Root/
1 | /* |
2 | * async.c: Asynchronous function calls for boot performance |
3 | * |
4 | * (C) Copyright 2009 Intel Corporation |
5 | * Author: Arjan van de Ven <arjan@linux.intel.com> |
6 | * |
7 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License |
9 | * as published by the Free Software Foundation; version 2 |
10 | * of the License. |
11 | */ |
12 | |
13 | |
14 | /* |
15 | |
16 | Goals and Theory of Operation |
17 | |
18 | The primary goal of this feature is to reduce the kernel boot time, |
19 | by doing various independent hardware delays and discovery operations |
20 | decoupled and not strictly serialized. |
21 | |
22 | More specifically, the asynchronous function call concept allows |
23 | certain operations (primarily during system boot) to happen |
24 | asynchronously, out of order, while these operations still |
25 | have their externally visible parts happen sequentially and in-order. |
26 | (not unlike how out-of-order CPUs retire their instructions in order) |
27 | |
28 | Key to the asynchronous function call implementation is the concept of |
29 | a "sequence cookie" (which, although it has an abstracted type, can be |
30 | thought of as a monotonically incrementing number). |
31 | |
32 | The async core will assign each scheduled event such a sequence cookie and |
33 | pass this to the called functions. |
34 | |
35 | The asynchronously called function should before doing a globally visible |
36 | operation, such as registering device numbers, call the |
37 | async_synchronize_cookie() function and pass in its own cookie. The |
38 | async_synchronize_cookie() function will make sure that all asynchronous |
39 | operations that were scheduled prior to the operation corresponding with the |
40 | cookie have completed. |
41 | |
42 | Subsystem/driver initialization code that scheduled asynchronous probe |
43 | functions, but which shares global resources with other drivers/subsystems |
44 | that do not use the asynchronous call feature, need to do a full |
45 | synchronization with the async_synchronize_full() function, before returning |
46 | from their init function. This is to maintain strict ordering between the |
47 | asynchronous and synchronous parts of the kernel. |
48 | |
49 | */ |
50 | |
51 | #include <linux/async.h> |
52 | #include <linux/module.h> |
53 | #include <linux/wait.h> |
54 | #include <linux/sched.h> |
55 | #include <linux/slab.h> |
56 | #include <linux/workqueue.h> |
57 | #include <asm/atomic.h> |
58 | |
59 | static async_cookie_t next_cookie = 1; |
60 | |
61 | #define MAX_WORK 32768 |
62 | |
63 | static LIST_HEAD(async_pending); |
64 | static LIST_HEAD(async_running); |
65 | static DEFINE_SPINLOCK(async_lock); |
66 | |
67 | struct async_entry { |
68 | struct list_head list; |
69 | struct work_struct work; |
70 | async_cookie_t cookie; |
71 | async_func_ptr *func; |
72 | void *data; |
73 | struct list_head *running; |
74 | }; |
75 | |
76 | static DECLARE_WAIT_QUEUE_HEAD(async_done); |
77 | |
78 | static atomic_t entry_count; |
79 | |
80 | extern int initcall_debug; |
81 | |
82 | |
83 | /* |
84 | * MUST be called with the lock held! |
85 | */ |
86 | static async_cookie_t __lowest_in_progress(struct list_head *running) |
87 | { |
88 | struct async_entry *entry; |
89 | |
90 | if (!list_empty(running)) { |
91 | entry = list_first_entry(running, |
92 | struct async_entry, list); |
93 | return entry->cookie; |
94 | } |
95 | |
96 | list_for_each_entry(entry, &async_pending, list) |
97 | if (entry->running == running) |
98 | return entry->cookie; |
99 | |
100 | return next_cookie; /* "infinity" value */ |
101 | } |
102 | |
103 | static async_cookie_t lowest_in_progress(struct list_head *running) |
104 | { |
105 | unsigned long flags; |
106 | async_cookie_t ret; |
107 | |
108 | spin_lock_irqsave(&async_lock, flags); |
109 | ret = __lowest_in_progress(running); |
110 | spin_unlock_irqrestore(&async_lock, flags); |
111 | return ret; |
112 | } |
113 | |
114 | /* |
115 | * pick the first pending entry and run it |
116 | */ |
117 | static void async_run_entry_fn(struct work_struct *work) |
118 | { |
119 | struct async_entry *entry = |
120 | container_of(work, struct async_entry, work); |
121 | unsigned long flags; |
122 | ktime_t calltime, delta, rettime; |
123 | |
124 | /* 1) move self to the running queue */ |
125 | spin_lock_irqsave(&async_lock, flags); |
126 | list_move_tail(&entry->list, entry->running); |
127 | spin_unlock_irqrestore(&async_lock, flags); |
128 | |
129 | /* 2) run (and print duration) */ |
130 | if (initcall_debug && system_state == SYSTEM_BOOTING) { |
131 | printk("calling %lli_%pF @ %i\n", (long long)entry->cookie, |
132 | entry->func, task_pid_nr(current)); |
133 | calltime = ktime_get(); |
134 | } |
135 | entry->func(entry->data, entry->cookie); |
136 | if (initcall_debug && system_state == SYSTEM_BOOTING) { |
137 | rettime = ktime_get(); |
138 | delta = ktime_sub(rettime, calltime); |
139 | printk("initcall %lli_%pF returned 0 after %lld usecs\n", |
140 | (long long)entry->cookie, |
141 | entry->func, |
142 | (long long)ktime_to_ns(delta) >> 10); |
143 | } |
144 | |
145 | /* 3) remove self from the running queue */ |
146 | spin_lock_irqsave(&async_lock, flags); |
147 | list_del(&entry->list); |
148 | |
149 | /* 4) free the entry */ |
150 | kfree(entry); |
151 | atomic_dec(&entry_count); |
152 | |
153 | spin_unlock_irqrestore(&async_lock, flags); |
154 | |
155 | /* 5) wake up any waiters */ |
156 | wake_up(&async_done); |
157 | } |
158 | |
159 | static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct list_head *running) |
160 | { |
161 | struct async_entry *entry; |
162 | unsigned long flags; |
163 | async_cookie_t newcookie; |
164 | |
165 | /* allow irq-off callers */ |
166 | entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC); |
167 | |
168 | /* |
169 | * If we're out of memory or if there's too much work |
170 | * pending already, we execute synchronously. |
171 | */ |
172 | if (!entry || atomic_read(&entry_count) > MAX_WORK) { |
173 | kfree(entry); |
174 | spin_lock_irqsave(&async_lock, flags); |
175 | newcookie = next_cookie++; |
176 | spin_unlock_irqrestore(&async_lock, flags); |
177 | |
178 | /* low on memory.. run synchronously */ |
179 | ptr(data, newcookie); |
180 | return newcookie; |
181 | } |
182 | INIT_WORK(&entry->work, async_run_entry_fn); |
183 | entry->func = ptr; |
184 | entry->data = data; |
185 | entry->running = running; |
186 | |
187 | spin_lock_irqsave(&async_lock, flags); |
188 | newcookie = entry->cookie = next_cookie++; |
189 | list_add_tail(&entry->list, &async_pending); |
190 | atomic_inc(&entry_count); |
191 | spin_unlock_irqrestore(&async_lock, flags); |
192 | |
193 | /* schedule for execution */ |
194 | queue_work(system_unbound_wq, &entry->work); |
195 | |
196 | return newcookie; |
197 | } |
198 | |
199 | /** |
200 | * async_schedule - schedule a function for asynchronous execution |
201 | * @ptr: function to execute asynchronously |
202 | * @data: data pointer to pass to the function |
203 | * |
204 | * Returns an async_cookie_t that may be used for checkpointing later. |
205 | * Note: This function may be called from atomic or non-atomic contexts. |
206 | */ |
207 | async_cookie_t async_schedule(async_func_ptr *ptr, void *data) |
208 | { |
209 | return __async_schedule(ptr, data, &async_running); |
210 | } |
211 | EXPORT_SYMBOL_GPL(async_schedule); |
212 | |
213 | /** |
214 | * async_schedule_domain - schedule a function for asynchronous execution within a certain domain |
215 | * @ptr: function to execute asynchronously |
216 | * @data: data pointer to pass to the function |
217 | * @running: running list for the domain |
218 | * |
219 | * Returns an async_cookie_t that may be used for checkpointing later. |
220 | * @running may be used in the async_synchronize_*_domain() functions |
221 | * to wait within a certain synchronization domain rather than globally. |
222 | * A synchronization domain is specified via the running queue @running to use. |
223 | * Note: This function may be called from atomic or non-atomic contexts. |
224 | */ |
225 | async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data, |
226 | struct list_head *running) |
227 | { |
228 | return __async_schedule(ptr, data, running); |
229 | } |
230 | EXPORT_SYMBOL_GPL(async_schedule_domain); |
231 | |
232 | /** |
233 | * async_synchronize_full - synchronize all asynchronous function calls |
234 | * |
235 | * This function waits until all asynchronous function calls have been done. |
236 | */ |
237 | void async_synchronize_full(void) |
238 | { |
239 | do { |
240 | async_synchronize_cookie(next_cookie); |
241 | } while (!list_empty(&async_running) || !list_empty(&async_pending)); |
242 | } |
243 | EXPORT_SYMBOL_GPL(async_synchronize_full); |
244 | |
245 | /** |
246 | * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain |
247 | * @list: running list to synchronize on |
248 | * |
249 | * This function waits until all asynchronous function calls for the |
250 | * synchronization domain specified by the running list @list have been done. |
251 | */ |
252 | void async_synchronize_full_domain(struct list_head *list) |
253 | { |
254 | async_synchronize_cookie_domain(next_cookie, list); |
255 | } |
256 | EXPORT_SYMBOL_GPL(async_synchronize_full_domain); |
257 | |
258 | /** |
259 | * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing |
260 | * @cookie: async_cookie_t to use as checkpoint |
261 | * @running: running list to synchronize on |
262 | * |
263 | * This function waits until all asynchronous function calls for the |
264 | * synchronization domain specified by the running list @list submitted |
265 | * prior to @cookie have been done. |
266 | */ |
267 | void async_synchronize_cookie_domain(async_cookie_t cookie, |
268 | struct list_head *running) |
269 | { |
270 | ktime_t starttime, delta, endtime; |
271 | |
272 | if (initcall_debug && system_state == SYSTEM_BOOTING) { |
273 | printk("async_waiting @ %i\n", task_pid_nr(current)); |
274 | starttime = ktime_get(); |
275 | } |
276 | |
277 | wait_event(async_done, lowest_in_progress(running) >= cookie); |
278 | |
279 | if (initcall_debug && system_state == SYSTEM_BOOTING) { |
280 | endtime = ktime_get(); |
281 | delta = ktime_sub(endtime, starttime); |
282 | |
283 | printk("async_continuing @ %i after %lli usec\n", |
284 | task_pid_nr(current), |
285 | (long long)ktime_to_ns(delta) >> 10); |
286 | } |
287 | } |
288 | EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain); |
289 | |
290 | /** |
291 | * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing |
292 | * @cookie: async_cookie_t to use as checkpoint |
293 | * |
294 | * This function waits until all asynchronous function calls prior to @cookie |
295 | * have been done. |
296 | */ |
297 | void async_synchronize_cookie(async_cookie_t cookie) |
298 | { |
299 | async_synchronize_cookie_domain(cookie, &async_running); |
300 | } |
301 | EXPORT_SYMBOL_GPL(async_synchronize_cookie); |
302 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9