Root/
1 | /* |
2 | * jump label support |
3 | * |
4 | * Copyright (C) 2009 Jason Baron <jbaron@redhat.com> |
5 | * Copyright (C) 2011 Peter Zijlstra <pzijlstr@redhat.com> |
6 | * |
7 | */ |
8 | #include <linux/memory.h> |
9 | #include <linux/uaccess.h> |
10 | #include <linux/module.h> |
11 | #include <linux/list.h> |
12 | #include <linux/slab.h> |
13 | #include <linux/sort.h> |
14 | #include <linux/err.h> |
15 | #include <linux/static_key.h> |
16 | #include <linux/jump_label_ratelimit.h> |
17 | |
18 | #ifdef HAVE_JUMP_LABEL |
19 | |
20 | /* mutex to protect coming/going of the the jump_label table */ |
21 | static DEFINE_MUTEX(jump_label_mutex); |
22 | |
23 | void jump_label_lock(void) |
24 | { |
25 | mutex_lock(&jump_label_mutex); |
26 | } |
27 | |
28 | void jump_label_unlock(void) |
29 | { |
30 | mutex_unlock(&jump_label_mutex); |
31 | } |
32 | |
33 | static int jump_label_cmp(const void *a, const void *b) |
34 | { |
35 | const struct jump_entry *jea = a; |
36 | const struct jump_entry *jeb = b; |
37 | |
38 | if (jea->key < jeb->key) |
39 | return -1; |
40 | |
41 | if (jea->key > jeb->key) |
42 | return 1; |
43 | |
44 | return 0; |
45 | } |
46 | |
47 | static void |
48 | jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) |
49 | { |
50 | unsigned long size; |
51 | |
52 | size = (((unsigned long)stop - (unsigned long)start) |
53 | / sizeof(struct jump_entry)); |
54 | sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL); |
55 | } |
56 | |
57 | static void jump_label_update(struct static_key *key, int enable); |
58 | |
59 | void static_key_slow_inc(struct static_key *key) |
60 | { |
61 | STATIC_KEY_CHECK_USE(); |
62 | if (atomic_inc_not_zero(&key->enabled)) |
63 | return; |
64 | |
65 | jump_label_lock(); |
66 | if (atomic_read(&key->enabled) == 0) { |
67 | if (!jump_label_get_branch_default(key)) |
68 | jump_label_update(key, JUMP_LABEL_ENABLE); |
69 | else |
70 | jump_label_update(key, JUMP_LABEL_DISABLE); |
71 | } |
72 | atomic_inc(&key->enabled); |
73 | jump_label_unlock(); |
74 | } |
75 | EXPORT_SYMBOL_GPL(static_key_slow_inc); |
76 | |
77 | static void __static_key_slow_dec(struct static_key *key, |
78 | unsigned long rate_limit, struct delayed_work *work) |
79 | { |
80 | if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) { |
81 | WARN(atomic_read(&key->enabled) < 0, |
82 | "jump label: negative count!\n"); |
83 | return; |
84 | } |
85 | |
86 | if (rate_limit) { |
87 | atomic_inc(&key->enabled); |
88 | schedule_delayed_work(work, rate_limit); |
89 | } else { |
90 | if (!jump_label_get_branch_default(key)) |
91 | jump_label_update(key, JUMP_LABEL_DISABLE); |
92 | else |
93 | jump_label_update(key, JUMP_LABEL_ENABLE); |
94 | } |
95 | jump_label_unlock(); |
96 | } |
97 | |
98 | static void jump_label_update_timeout(struct work_struct *work) |
99 | { |
100 | struct static_key_deferred *key = |
101 | container_of(work, struct static_key_deferred, work.work); |
102 | __static_key_slow_dec(&key->key, 0, NULL); |
103 | } |
104 | |
105 | void static_key_slow_dec(struct static_key *key) |
106 | { |
107 | STATIC_KEY_CHECK_USE(); |
108 | __static_key_slow_dec(key, 0, NULL); |
109 | } |
110 | EXPORT_SYMBOL_GPL(static_key_slow_dec); |
111 | |
112 | void static_key_slow_dec_deferred(struct static_key_deferred *key) |
113 | { |
114 | STATIC_KEY_CHECK_USE(); |
115 | __static_key_slow_dec(&key->key, key->timeout, &key->work); |
116 | } |
117 | EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred); |
118 | |
119 | void jump_label_rate_limit(struct static_key_deferred *key, |
120 | unsigned long rl) |
121 | { |
122 | STATIC_KEY_CHECK_USE(); |
123 | key->timeout = rl; |
124 | INIT_DELAYED_WORK(&key->work, jump_label_update_timeout); |
125 | } |
126 | EXPORT_SYMBOL_GPL(jump_label_rate_limit); |
127 | |
128 | static int addr_conflict(struct jump_entry *entry, void *start, void *end) |
129 | { |
130 | if (entry->code <= (unsigned long)end && |
131 | entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start) |
132 | return 1; |
133 | |
134 | return 0; |
135 | } |
136 | |
137 | static int __jump_label_text_reserved(struct jump_entry *iter_start, |
138 | struct jump_entry *iter_stop, void *start, void *end) |
139 | { |
140 | struct jump_entry *iter; |
141 | |
142 | iter = iter_start; |
143 | while (iter < iter_stop) { |
144 | if (addr_conflict(iter, start, end)) |
145 | return 1; |
146 | iter++; |
147 | } |
148 | |
149 | return 0; |
150 | } |
151 | |
152 | /* |
153 | * Update code which is definitely not currently executing. |
154 | * Architectures which need heavyweight synchronization to modify |
155 | * running code can override this to make the non-live update case |
156 | * cheaper. |
157 | */ |
158 | void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry, |
159 | enum jump_label_type type) |
160 | { |
161 | arch_jump_label_transform(entry, type); |
162 | } |
163 | |
164 | static void __jump_label_update(struct static_key *key, |
165 | struct jump_entry *entry, |
166 | struct jump_entry *stop, int enable) |
167 | { |
168 | for (; (entry < stop) && |
169 | (entry->key == (jump_label_t)(unsigned long)key); |
170 | entry++) { |
171 | /* |
172 | * entry->code set to 0 invalidates module init text sections |
173 | * kernel_text_address() verifies we are not in core kernel |
174 | * init code, see jump_label_invalidate_module_init(). |
175 | */ |
176 | if (entry->code && kernel_text_address(entry->code)) |
177 | arch_jump_label_transform(entry, enable); |
178 | } |
179 | } |
180 | |
181 | static enum jump_label_type jump_label_type(struct static_key *key) |
182 | { |
183 | bool true_branch = jump_label_get_branch_default(key); |
184 | bool state = static_key_enabled(key); |
185 | |
186 | if ((!true_branch && state) || (true_branch && !state)) |
187 | return JUMP_LABEL_ENABLE; |
188 | |
189 | return JUMP_LABEL_DISABLE; |
190 | } |
191 | |
192 | void __init jump_label_init(void) |
193 | { |
194 | struct jump_entry *iter_start = __start___jump_table; |
195 | struct jump_entry *iter_stop = __stop___jump_table; |
196 | struct static_key *key = NULL; |
197 | struct jump_entry *iter; |
198 | |
199 | jump_label_lock(); |
200 | jump_label_sort_entries(iter_start, iter_stop); |
201 | |
202 | for (iter = iter_start; iter < iter_stop; iter++) { |
203 | struct static_key *iterk; |
204 | |
205 | iterk = (struct static_key *)(unsigned long)iter->key; |
206 | arch_jump_label_transform_static(iter, jump_label_type(iterk)); |
207 | if (iterk == key) |
208 | continue; |
209 | |
210 | key = iterk; |
211 | /* |
212 | * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH. |
213 | */ |
214 | *((unsigned long *)&key->entries) += (unsigned long)iter; |
215 | #ifdef CONFIG_MODULES |
216 | key->next = NULL; |
217 | #endif |
218 | } |
219 | static_key_initialized = true; |
220 | jump_label_unlock(); |
221 | } |
222 | |
223 | #ifdef CONFIG_MODULES |
224 | |
225 | struct static_key_mod { |
226 | struct static_key_mod *next; |
227 | struct jump_entry *entries; |
228 | struct module *mod; |
229 | }; |
230 | |
231 | static int __jump_label_mod_text_reserved(void *start, void *end) |
232 | { |
233 | struct module *mod; |
234 | |
235 | mod = __module_text_address((unsigned long)start); |
236 | if (!mod) |
237 | return 0; |
238 | |
239 | WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); |
240 | |
241 | return __jump_label_text_reserved(mod->jump_entries, |
242 | mod->jump_entries + mod->num_jump_entries, |
243 | start, end); |
244 | } |
245 | |
246 | static void __jump_label_mod_update(struct static_key *key, int enable) |
247 | { |
248 | struct static_key_mod *mod = key->next; |
249 | |
250 | while (mod) { |
251 | struct module *m = mod->mod; |
252 | |
253 | __jump_label_update(key, mod->entries, |
254 | m->jump_entries + m->num_jump_entries, |
255 | enable); |
256 | mod = mod->next; |
257 | } |
258 | } |
259 | |
260 | /*** |
261 | * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop() |
262 | * @mod: module to patch |
263 | * |
264 | * Allow for run-time selection of the optimal nops. Before the module |
265 | * loads patch these with arch_get_jump_label_nop(), which is specified by |
266 | * the arch specific jump label code. |
267 | */ |
268 | void jump_label_apply_nops(struct module *mod) |
269 | { |
270 | struct jump_entry *iter_start = mod->jump_entries; |
271 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; |
272 | struct jump_entry *iter; |
273 | |
274 | /* if the module doesn't have jump label entries, just return */ |
275 | if (iter_start == iter_stop) |
276 | return; |
277 | |
278 | for (iter = iter_start; iter < iter_stop; iter++) { |
279 | arch_jump_label_transform_static(iter, JUMP_LABEL_DISABLE); |
280 | } |
281 | } |
282 | |
283 | static int jump_label_add_module(struct module *mod) |
284 | { |
285 | struct jump_entry *iter_start = mod->jump_entries; |
286 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; |
287 | struct jump_entry *iter; |
288 | struct static_key *key = NULL; |
289 | struct static_key_mod *jlm; |
290 | |
291 | /* if the module doesn't have jump label entries, just return */ |
292 | if (iter_start == iter_stop) |
293 | return 0; |
294 | |
295 | jump_label_sort_entries(iter_start, iter_stop); |
296 | |
297 | for (iter = iter_start; iter < iter_stop; iter++) { |
298 | struct static_key *iterk; |
299 | |
300 | iterk = (struct static_key *)(unsigned long)iter->key; |
301 | if (iterk == key) |
302 | continue; |
303 | |
304 | key = iterk; |
305 | if (__module_address(iter->key) == mod) { |
306 | /* |
307 | * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH. |
308 | */ |
309 | *((unsigned long *)&key->entries) += (unsigned long)iter; |
310 | key->next = NULL; |
311 | continue; |
312 | } |
313 | jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL); |
314 | if (!jlm) |
315 | return -ENOMEM; |
316 | jlm->mod = mod; |
317 | jlm->entries = iter; |
318 | jlm->next = key->next; |
319 | key->next = jlm; |
320 | |
321 | if (jump_label_type(key) == JUMP_LABEL_ENABLE) |
322 | __jump_label_update(key, iter, iter_stop, JUMP_LABEL_ENABLE); |
323 | } |
324 | |
325 | return 0; |
326 | } |
327 | |
328 | static void jump_label_del_module(struct module *mod) |
329 | { |
330 | struct jump_entry *iter_start = mod->jump_entries; |
331 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; |
332 | struct jump_entry *iter; |
333 | struct static_key *key = NULL; |
334 | struct static_key_mod *jlm, **prev; |
335 | |
336 | for (iter = iter_start; iter < iter_stop; iter++) { |
337 | if (iter->key == (jump_label_t)(unsigned long)key) |
338 | continue; |
339 | |
340 | key = (struct static_key *)(unsigned long)iter->key; |
341 | |
342 | if (__module_address(iter->key) == mod) |
343 | continue; |
344 | |
345 | prev = &key->next; |
346 | jlm = key->next; |
347 | |
348 | while (jlm && jlm->mod != mod) { |
349 | prev = &jlm->next; |
350 | jlm = jlm->next; |
351 | } |
352 | |
353 | if (jlm) { |
354 | *prev = jlm->next; |
355 | kfree(jlm); |
356 | } |
357 | } |
358 | } |
359 | |
360 | static void jump_label_invalidate_module_init(struct module *mod) |
361 | { |
362 | struct jump_entry *iter_start = mod->jump_entries; |
363 | struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; |
364 | struct jump_entry *iter; |
365 | |
366 | for (iter = iter_start; iter < iter_stop; iter++) { |
367 | if (within_module_init(iter->code, mod)) |
368 | iter->code = 0; |
369 | } |
370 | } |
371 | |
372 | static int |
373 | jump_label_module_notify(struct notifier_block *self, unsigned long val, |
374 | void *data) |
375 | { |
376 | struct module *mod = data; |
377 | int ret = 0; |
378 | |
379 | switch (val) { |
380 | case MODULE_STATE_COMING: |
381 | jump_label_lock(); |
382 | ret = jump_label_add_module(mod); |
383 | if (ret) |
384 | jump_label_del_module(mod); |
385 | jump_label_unlock(); |
386 | break; |
387 | case MODULE_STATE_GOING: |
388 | jump_label_lock(); |
389 | jump_label_del_module(mod); |
390 | jump_label_unlock(); |
391 | break; |
392 | case MODULE_STATE_LIVE: |
393 | jump_label_lock(); |
394 | jump_label_invalidate_module_init(mod); |
395 | jump_label_unlock(); |
396 | break; |
397 | } |
398 | |
399 | return notifier_from_errno(ret); |
400 | } |
401 | |
402 | struct notifier_block jump_label_module_nb = { |
403 | .notifier_call = jump_label_module_notify, |
404 | .priority = 1, /* higher than tracepoints */ |
405 | }; |
406 | |
407 | static __init int jump_label_init_module(void) |
408 | { |
409 | return register_module_notifier(&jump_label_module_nb); |
410 | } |
411 | early_initcall(jump_label_init_module); |
412 | |
413 | #endif /* CONFIG_MODULES */ |
414 | |
415 | /*** |
416 | * jump_label_text_reserved - check if addr range is reserved |
417 | * @start: start text addr |
418 | * @end: end text addr |
419 | * |
420 | * checks if the text addr located between @start and @end |
421 | * overlaps with any of the jump label patch addresses. Code |
422 | * that wants to modify kernel text should first verify that |
423 | * it does not overlap with any of the jump label addresses. |
424 | * Caller must hold jump_label_mutex. |
425 | * |
426 | * returns 1 if there is an overlap, 0 otherwise |
427 | */ |
428 | int jump_label_text_reserved(void *start, void *end) |
429 | { |
430 | int ret = __jump_label_text_reserved(__start___jump_table, |
431 | __stop___jump_table, start, end); |
432 | |
433 | if (ret) |
434 | return ret; |
435 | |
436 | #ifdef CONFIG_MODULES |
437 | ret = __jump_label_mod_text_reserved(start, end); |
438 | #endif |
439 | return ret; |
440 | } |
441 | |
442 | static void jump_label_update(struct static_key *key, int enable) |
443 | { |
444 | struct jump_entry *stop = __stop___jump_table; |
445 | struct jump_entry *entry = jump_label_get_entries(key); |
446 | |
447 | #ifdef CONFIG_MODULES |
448 | struct module *mod = __module_address((unsigned long)key); |
449 | |
450 | __jump_label_mod_update(key, enable); |
451 | |
452 | if (mod) |
453 | stop = mod->jump_entries + mod->num_jump_entries; |
454 | #endif |
455 | /* if there are no users, entry can be NULL */ |
456 | if (entry) |
457 | __jump_label_update(key, entry, stop, enable); |
458 | } |
459 | |
460 | #endif |
461 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9