Root/
1 | /* |
2 | * jump label support |
3 | * |
4 | * Copyright (C) 2009 Jason Baron <jbaron@redhat.com> |
5 | * |
6 | */ |
7 | #include <linux/jump_label.h> |
8 | #include <linux/memory.h> |
9 | #include <linux/uaccess.h> |
10 | #include <linux/module.h> |
11 | #include <linux/list.h> |
12 | #include <linux/jhash.h> |
13 | #include <linux/slab.h> |
14 | #include <linux/sort.h> |
15 | #include <linux/err.h> |
16 | |
17 | #ifdef HAVE_JUMP_LABEL |
18 | |
19 | #define JUMP_LABEL_HASH_BITS 6 |
20 | #define JUMP_LABEL_TABLE_SIZE (1 << JUMP_LABEL_HASH_BITS) |
21 | static struct hlist_head jump_label_table[JUMP_LABEL_TABLE_SIZE]; |
22 | |
23 | /* mutex to protect coming/going of the the jump_label table */ |
24 | static DEFINE_MUTEX(jump_label_mutex); |
25 | |
26 | struct jump_label_entry { |
27 | struct hlist_node hlist; |
28 | struct jump_entry *table; |
29 | int nr_entries; |
30 | /* hang modules off here */ |
31 | struct hlist_head modules; |
32 | unsigned long key; |
33 | }; |
34 | |
35 | struct jump_label_module_entry { |
36 | struct hlist_node hlist; |
37 | struct jump_entry *table; |
38 | int nr_entries; |
39 | struct module *mod; |
40 | }; |
41 | |
42 | void jump_label_lock(void) |
43 | { |
44 | mutex_lock(&jump_label_mutex); |
45 | } |
46 | |
47 | void jump_label_unlock(void) |
48 | { |
49 | mutex_unlock(&jump_label_mutex); |
50 | } |
51 | |
52 | static int jump_label_cmp(const void *a, const void *b) |
53 | { |
54 | const struct jump_entry *jea = a; |
55 | const struct jump_entry *jeb = b; |
56 | |
57 | if (jea->key < jeb->key) |
58 | return -1; |
59 | |
60 | if (jea->key > jeb->key) |
61 | return 1; |
62 | |
63 | return 0; |
64 | } |
65 | |
66 | static void |
67 | sort_jump_label_entries(struct jump_entry *start, struct jump_entry *stop) |
68 | { |
69 | unsigned long size; |
70 | |
71 | size = (((unsigned long)stop - (unsigned long)start) |
72 | / sizeof(struct jump_entry)); |
73 | sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL); |
74 | } |
75 | |
76 | static struct jump_label_entry *get_jump_label_entry(jump_label_t key) |
77 | { |
78 | struct hlist_head *head; |
79 | struct hlist_node *node; |
80 | struct jump_label_entry *e; |
81 | u32 hash = jhash((void *)&key, sizeof(jump_label_t), 0); |
82 | |
83 | head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)]; |
84 | hlist_for_each_entry(e, node, head, hlist) { |
85 | if (key == e->key) |
86 | return e; |
87 | } |
88 | return NULL; |
89 | } |
90 | |
91 | static struct jump_label_entry * |
92 | add_jump_label_entry(jump_label_t key, int nr_entries, struct jump_entry *table) |
93 | { |
94 | struct hlist_head *head; |
95 | struct jump_label_entry *e; |
96 | u32 hash; |
97 | |
98 | e = get_jump_label_entry(key); |
99 | if (e) |
100 | return ERR_PTR(-EEXIST); |
101 | |
102 | e = kmalloc(sizeof(struct jump_label_entry), GFP_KERNEL); |
103 | if (!e) |
104 | return ERR_PTR(-ENOMEM); |
105 | |
106 | hash = jhash((void *)&key, sizeof(jump_label_t), 0); |
107 | head = &jump_label_table[hash & (JUMP_LABEL_TABLE_SIZE - 1)]; |
108 | e->key = key; |
109 | e->table = table; |
110 | e->nr_entries = nr_entries; |
111 | INIT_HLIST_HEAD(&(e->modules)); |
112 | hlist_add_head(&e->hlist, head); |
113 | return e; |
114 | } |
115 | |
116 | static int |
117 | build_jump_label_hashtable(struct jump_entry *start, struct jump_entry *stop) |
118 | { |
119 | struct jump_entry *iter, *iter_begin; |
120 | struct jump_label_entry *entry; |
121 | int count; |
122 | |
123 | sort_jump_label_entries(start, stop); |
124 | iter = start; |
125 | while (iter < stop) { |
126 | entry = get_jump_label_entry(iter->key); |
127 | if (!entry) { |
128 | iter_begin = iter; |
129 | count = 0; |
130 | while ((iter < stop) && |
131 | (iter->key == iter_begin->key)) { |
132 | iter++; |
133 | count++; |
134 | } |
135 | entry = add_jump_label_entry(iter_begin->key, |
136 | count, iter_begin); |
137 | if (IS_ERR(entry)) |
138 | return PTR_ERR(entry); |
139 | } else { |
140 | WARN_ONCE(1, KERN_ERR "build_jump_hashtable: unexpected entry!\n"); |
141 | return -1; |
142 | } |
143 | } |
144 | return 0; |
145 | } |
146 | |
147 | /*** |
148 | * jump_label_update - update jump label text |
149 | * @key - key value associated with a a jump label |
150 | * @type - enum set to JUMP_LABEL_ENABLE or JUMP_LABEL_DISABLE |
151 | * |
152 | * Will enable/disable the jump for jump label @key, depending on the |
153 | * value of @type. |
154 | * |
155 | */ |
156 | |
157 | void jump_label_update(unsigned long key, enum jump_label_type type) |
158 | { |
159 | struct jump_entry *iter; |
160 | struct jump_label_entry *entry; |
161 | struct hlist_node *module_node; |
162 | struct jump_label_module_entry *e_module; |
163 | int count; |
164 | |
165 | jump_label_lock(); |
166 | entry = get_jump_label_entry((jump_label_t)key); |
167 | if (entry) { |
168 | count = entry->nr_entries; |
169 | iter = entry->table; |
170 | while (count--) { |
171 | if (kernel_text_address(iter->code)) |
172 | arch_jump_label_transform(iter, type); |
173 | iter++; |
174 | } |
175 | /* eanble/disable jump labels in modules */ |
176 | hlist_for_each_entry(e_module, module_node, &(entry->modules), |
177 | hlist) { |
178 | count = e_module->nr_entries; |
179 | iter = e_module->table; |
180 | while (count--) { |
181 | if (iter->key && |
182 | kernel_text_address(iter->code)) |
183 | arch_jump_label_transform(iter, type); |
184 | iter++; |
185 | } |
186 | } |
187 | } |
188 | jump_label_unlock(); |
189 | } |
190 | |
191 | static int addr_conflict(struct jump_entry *entry, void *start, void *end) |
192 | { |
193 | if (entry->code <= (unsigned long)end && |
194 | entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start) |
195 | return 1; |
196 | |
197 | return 0; |
198 | } |
199 | |
200 | #ifdef CONFIG_MODULES |
201 | |
202 | static int module_conflict(void *start, void *end) |
203 | { |
204 | struct hlist_head *head; |
205 | struct hlist_node *node, *node_next, *module_node, *module_node_next; |
206 | struct jump_label_entry *e; |
207 | struct jump_label_module_entry *e_module; |
208 | struct jump_entry *iter; |
209 | int i, count; |
210 | int conflict = 0; |
211 | |
212 | for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) { |
213 | head = &jump_label_table[i]; |
214 | hlist_for_each_entry_safe(e, node, node_next, head, hlist) { |
215 | hlist_for_each_entry_safe(e_module, module_node, |
216 | module_node_next, |
217 | &(e->modules), hlist) { |
218 | count = e_module->nr_entries; |
219 | iter = e_module->table; |
220 | while (count--) { |
221 | if (addr_conflict(iter, start, end)) { |
222 | conflict = 1; |
223 | goto out; |
224 | } |
225 | iter++; |
226 | } |
227 | } |
228 | } |
229 | } |
230 | out: |
231 | return conflict; |
232 | } |
233 | |
234 | #endif |
235 | |
236 | /*** |
237 | * jump_label_text_reserved - check if addr range is reserved |
238 | * @start: start text addr |
239 | * @end: end text addr |
240 | * |
241 | * checks if the text addr located between @start and @end |
242 | * overlaps with any of the jump label patch addresses. Code |
243 | * that wants to modify kernel text should first verify that |
244 | * it does not overlap with any of the jump label addresses. |
245 | * Caller must hold jump_label_mutex. |
246 | * |
247 | * returns 1 if there is an overlap, 0 otherwise |
248 | */ |
249 | int jump_label_text_reserved(void *start, void *end) |
250 | { |
251 | struct jump_entry *iter; |
252 | struct jump_entry *iter_start = __start___jump_table; |
253 | struct jump_entry *iter_stop = __start___jump_table; |
254 | int conflict = 0; |
255 | |
256 | iter = iter_start; |
257 | while (iter < iter_stop) { |
258 | if (addr_conflict(iter, start, end)) { |
259 | conflict = 1; |
260 | goto out; |
261 | } |
262 | iter++; |
263 | } |
264 | |
265 | /* now check modules */ |
266 | #ifdef CONFIG_MODULES |
267 | conflict = module_conflict(start, end); |
268 | #endif |
269 | out: |
270 | return conflict; |
271 | } |
272 | |
273 | /* |
274 | * Not all archs need this. |
275 | */ |
276 | void __weak arch_jump_label_text_poke_early(jump_label_t addr) |
277 | { |
278 | } |
279 | |
280 | static __init int init_jump_label(void) |
281 | { |
282 | int ret; |
283 | struct jump_entry *iter_start = __start___jump_table; |
284 | struct jump_entry *iter_stop = __stop___jump_table; |
285 | struct jump_entry *iter; |
286 | |
287 | jump_label_lock(); |
288 | ret = build_jump_label_hashtable(__start___jump_table, |
289 | __stop___jump_table); |
290 | iter = iter_start; |
291 | while (iter < iter_stop) { |
292 | arch_jump_label_text_poke_early(iter->code); |
293 | iter++; |
294 | } |
295 | jump_label_unlock(); |
296 | return ret; |
297 | } |
298 | early_initcall(init_jump_label); |
299 | |
300 | #ifdef CONFIG_MODULES |
301 | |
302 | static struct jump_label_module_entry * |
303 | add_jump_label_module_entry(struct jump_label_entry *entry, |
304 | struct jump_entry *iter_begin, |
305 | int count, struct module *mod) |
306 | { |
307 | struct jump_label_module_entry *e; |
308 | |
309 | e = kmalloc(sizeof(struct jump_label_module_entry), GFP_KERNEL); |
310 | if (!e) |
311 | return ERR_PTR(-ENOMEM); |
312 | e->mod = mod; |
313 | e->nr_entries = count; |
314 | e->table = iter_begin; |
315 | hlist_add_head(&e->hlist, &entry->modules); |
316 | return e; |
317 | } |
318 | |
319 | static int add_jump_label_module(struct module *mod) |
320 | { |
321 | struct jump_entry *iter, *iter_begin; |
322 | struct jump_label_entry *entry; |
323 | struct jump_label_module_entry *module_entry; |
324 | int count; |
325 | |
326 | /* if the module doesn't have jump label entries, just return */ |
327 | if (!mod->num_jump_entries) |
328 | return 0; |
329 | |
330 | sort_jump_label_entries(mod->jump_entries, |
331 | mod->jump_entries + mod->num_jump_entries); |
332 | iter = mod->jump_entries; |
333 | while (iter < mod->jump_entries + mod->num_jump_entries) { |
334 | entry = get_jump_label_entry(iter->key); |
335 | iter_begin = iter; |
336 | count = 0; |
337 | while ((iter < mod->jump_entries + mod->num_jump_entries) && |
338 | (iter->key == iter_begin->key)) { |
339 | iter++; |
340 | count++; |
341 | } |
342 | if (!entry) { |
343 | entry = add_jump_label_entry(iter_begin->key, 0, NULL); |
344 | if (IS_ERR(entry)) |
345 | return PTR_ERR(entry); |
346 | } |
347 | module_entry = add_jump_label_module_entry(entry, iter_begin, |
348 | count, mod); |
349 | if (IS_ERR(module_entry)) |
350 | return PTR_ERR(module_entry); |
351 | } |
352 | return 0; |
353 | } |
354 | |
355 | static void remove_jump_label_module(struct module *mod) |
356 | { |
357 | struct hlist_head *head; |
358 | struct hlist_node *node, *node_next, *module_node, *module_node_next; |
359 | struct jump_label_entry *e; |
360 | struct jump_label_module_entry *e_module; |
361 | int i; |
362 | |
363 | /* if the module doesn't have jump label entries, just return */ |
364 | if (!mod->num_jump_entries) |
365 | return; |
366 | |
367 | for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) { |
368 | head = &jump_label_table[i]; |
369 | hlist_for_each_entry_safe(e, node, node_next, head, hlist) { |
370 | hlist_for_each_entry_safe(e_module, module_node, |
371 | module_node_next, |
372 | &(e->modules), hlist) { |
373 | if (e_module->mod == mod) { |
374 | hlist_del(&e_module->hlist); |
375 | kfree(e_module); |
376 | } |
377 | } |
378 | if (hlist_empty(&e->modules) && (e->nr_entries == 0)) { |
379 | hlist_del(&e->hlist); |
380 | kfree(e); |
381 | } |
382 | } |
383 | } |
384 | } |
385 | |
386 | static void remove_jump_label_module_init(struct module *mod) |
387 | { |
388 | struct hlist_head *head; |
389 | struct hlist_node *node, *node_next, *module_node, *module_node_next; |
390 | struct jump_label_entry *e; |
391 | struct jump_label_module_entry *e_module; |
392 | struct jump_entry *iter; |
393 | int i, count; |
394 | |
395 | /* if the module doesn't have jump label entries, just return */ |
396 | if (!mod->num_jump_entries) |
397 | return; |
398 | |
399 | for (i = 0; i < JUMP_LABEL_TABLE_SIZE; i++) { |
400 | head = &jump_label_table[i]; |
401 | hlist_for_each_entry_safe(e, node, node_next, head, hlist) { |
402 | hlist_for_each_entry_safe(e_module, module_node, |
403 | module_node_next, |
404 | &(e->modules), hlist) { |
405 | if (e_module->mod != mod) |
406 | continue; |
407 | count = e_module->nr_entries; |
408 | iter = e_module->table; |
409 | while (count--) { |
410 | if (within_module_init(iter->code, mod)) |
411 | iter->key = 0; |
412 | iter++; |
413 | } |
414 | } |
415 | } |
416 | } |
417 | } |
418 | |
419 | static int |
420 | jump_label_module_notify(struct notifier_block *self, unsigned long val, |
421 | void *data) |
422 | { |
423 | struct module *mod = data; |
424 | int ret = 0; |
425 | |
426 | switch (val) { |
427 | case MODULE_STATE_COMING: |
428 | jump_label_lock(); |
429 | ret = add_jump_label_module(mod); |
430 | if (ret) |
431 | remove_jump_label_module(mod); |
432 | jump_label_unlock(); |
433 | break; |
434 | case MODULE_STATE_GOING: |
435 | jump_label_lock(); |
436 | remove_jump_label_module(mod); |
437 | jump_label_unlock(); |
438 | break; |
439 | case MODULE_STATE_LIVE: |
440 | jump_label_lock(); |
441 | remove_jump_label_module_init(mod); |
442 | jump_label_unlock(); |
443 | break; |
444 | } |
445 | return ret; |
446 | } |
447 | |
448 | /*** |
449 | * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop() |
450 | * @mod: module to patch |
451 | * |
452 | * Allow for run-time selection of the optimal nops. Before the module |
453 | * loads patch these with arch_get_jump_label_nop(), which is specified by |
454 | * the arch specific jump label code. |
455 | */ |
456 | void jump_label_apply_nops(struct module *mod) |
457 | { |
458 | struct jump_entry *iter; |
459 | |
460 | /* if the module doesn't have jump label entries, just return */ |
461 | if (!mod->num_jump_entries) |
462 | return; |
463 | |
464 | iter = mod->jump_entries; |
465 | while (iter < mod->jump_entries + mod->num_jump_entries) { |
466 | arch_jump_label_text_poke_early(iter->code); |
467 | iter++; |
468 | } |
469 | } |
470 | |
471 | struct notifier_block jump_label_module_nb = { |
472 | .notifier_call = jump_label_module_notify, |
473 | .priority = 0, |
474 | }; |
475 | |
476 | static __init int init_jump_label_module(void) |
477 | { |
478 | return register_module_notifier(&jump_label_module_nb); |
479 | } |
480 | early_initcall(init_jump_label_module); |
481 | |
482 | #endif /* CONFIG_MODULES */ |
483 | |
484 | #endif |
485 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9