Root/
1 | /* |
2 | * Generic infrastructure for lifetime debugging of objects. |
3 | * |
4 | * Started by Thomas Gleixner |
5 | * |
6 | * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de> |
7 | * |
8 | * For licencing details see kernel-base/COPYING |
9 | */ |
10 | #include <linux/debugobjects.h> |
11 | #include <linux/interrupt.h> |
12 | #include <linux/sched.h> |
13 | #include <linux/seq_file.h> |
14 | #include <linux/debugfs.h> |
15 | #include <linux/slab.h> |
16 | #include <linux/hash.h> |
17 | |
18 | #define ODEBUG_HASH_BITS 14 |
19 | #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS) |
20 | |
21 | #define ODEBUG_POOL_SIZE 512 |
22 | #define ODEBUG_POOL_MIN_LEVEL 256 |
23 | |
24 | #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT |
25 | #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) |
26 | #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) |
27 | |
28 | struct debug_bucket { |
29 | struct hlist_head list; |
30 | raw_spinlock_t lock; |
31 | }; |
32 | |
33 | static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; |
34 | |
35 | static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; |
36 | |
37 | static DEFINE_RAW_SPINLOCK(pool_lock); |
38 | |
39 | static HLIST_HEAD(obj_pool); |
40 | |
41 | static int obj_pool_min_free = ODEBUG_POOL_SIZE; |
42 | static int obj_pool_free = ODEBUG_POOL_SIZE; |
43 | static int obj_pool_used; |
44 | static int obj_pool_max_used; |
45 | static struct kmem_cache *obj_cache; |
46 | |
47 | static int debug_objects_maxchain __read_mostly; |
48 | static int debug_objects_fixups __read_mostly; |
49 | static int debug_objects_warnings __read_mostly; |
50 | static int debug_objects_enabled __read_mostly |
51 | = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT; |
52 | |
53 | static struct debug_obj_descr *descr_test __read_mostly; |
54 | |
55 | static void free_obj_work(struct work_struct *work); |
56 | static DECLARE_WORK(debug_obj_work, free_obj_work); |
57 | |
58 | static int __init enable_object_debug(char *str) |
59 | { |
60 | debug_objects_enabled = 1; |
61 | return 0; |
62 | } |
63 | |
64 | static int __init disable_object_debug(char *str) |
65 | { |
66 | debug_objects_enabled = 0; |
67 | return 0; |
68 | } |
69 | |
70 | early_param("debug_objects", enable_object_debug); |
71 | early_param("no_debug_objects", disable_object_debug); |
72 | |
73 | static const char *obj_states[ODEBUG_STATE_MAX] = { |
74 | [ODEBUG_STATE_NONE] = "none", |
75 | [ODEBUG_STATE_INIT] = "initialized", |
76 | [ODEBUG_STATE_INACTIVE] = "inactive", |
77 | [ODEBUG_STATE_ACTIVE] = "active", |
78 | [ODEBUG_STATE_DESTROYED] = "destroyed", |
79 | [ODEBUG_STATE_NOTAVAILABLE] = "not available", |
80 | }; |
81 | |
82 | static int fill_pool(void) |
83 | { |
84 | gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; |
85 | struct debug_obj *new; |
86 | unsigned long flags; |
87 | |
88 | if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) |
89 | return obj_pool_free; |
90 | |
91 | if (unlikely(!obj_cache)) |
92 | return obj_pool_free; |
93 | |
94 | while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) { |
95 | |
96 | new = kmem_cache_zalloc(obj_cache, gfp); |
97 | if (!new) |
98 | return obj_pool_free; |
99 | |
100 | raw_spin_lock_irqsave(&pool_lock, flags); |
101 | hlist_add_head(&new->node, &obj_pool); |
102 | obj_pool_free++; |
103 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
104 | } |
105 | return obj_pool_free; |
106 | } |
107 | |
108 | /* |
109 | * Lookup an object in the hash bucket. |
110 | */ |
111 | static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) |
112 | { |
113 | struct hlist_node *node; |
114 | struct debug_obj *obj; |
115 | int cnt = 0; |
116 | |
117 | hlist_for_each_entry(obj, node, &b->list, node) { |
118 | cnt++; |
119 | if (obj->object == addr) |
120 | return obj; |
121 | } |
122 | if (cnt > debug_objects_maxchain) |
123 | debug_objects_maxchain = cnt; |
124 | |
125 | return NULL; |
126 | } |
127 | |
128 | /* |
129 | * Allocate a new object. If the pool is empty, switch off the debugger. |
130 | * Must be called with interrupts disabled. |
131 | */ |
132 | static struct debug_obj * |
133 | alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) |
134 | { |
135 | struct debug_obj *obj = NULL; |
136 | |
137 | raw_spin_lock(&pool_lock); |
138 | if (obj_pool.first) { |
139 | obj = hlist_entry(obj_pool.first, typeof(*obj), node); |
140 | |
141 | obj->object = addr; |
142 | obj->descr = descr; |
143 | obj->state = ODEBUG_STATE_NONE; |
144 | hlist_del(&obj->node); |
145 | |
146 | hlist_add_head(&obj->node, &b->list); |
147 | |
148 | obj_pool_used++; |
149 | if (obj_pool_used > obj_pool_max_used) |
150 | obj_pool_max_used = obj_pool_used; |
151 | |
152 | obj_pool_free--; |
153 | if (obj_pool_free < obj_pool_min_free) |
154 | obj_pool_min_free = obj_pool_free; |
155 | } |
156 | raw_spin_unlock(&pool_lock); |
157 | |
158 | return obj; |
159 | } |
160 | |
161 | /* |
162 | * workqueue function to free objects. |
163 | */ |
164 | static void free_obj_work(struct work_struct *work) |
165 | { |
166 | struct debug_obj *obj; |
167 | unsigned long flags; |
168 | |
169 | raw_spin_lock_irqsave(&pool_lock, flags); |
170 | while (obj_pool_free > ODEBUG_POOL_SIZE) { |
171 | obj = hlist_entry(obj_pool.first, typeof(*obj), node); |
172 | hlist_del(&obj->node); |
173 | obj_pool_free--; |
174 | /* |
175 | * We release pool_lock across kmem_cache_free() to |
176 | * avoid contention on pool_lock. |
177 | */ |
178 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
179 | kmem_cache_free(obj_cache, obj); |
180 | raw_spin_lock_irqsave(&pool_lock, flags); |
181 | } |
182 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
183 | } |
184 | |
185 | /* |
186 | * Put the object back into the pool and schedule work to free objects |
187 | * if necessary. |
188 | */ |
189 | static void free_object(struct debug_obj *obj) |
190 | { |
191 | unsigned long flags; |
192 | int sched = 0; |
193 | |
194 | raw_spin_lock_irqsave(&pool_lock, flags); |
195 | /* |
196 | * schedule work when the pool is filled and the cache is |
197 | * initialized: |
198 | */ |
199 | if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache) |
200 | sched = !work_pending(&debug_obj_work); |
201 | hlist_add_head(&obj->node, &obj_pool); |
202 | obj_pool_free++; |
203 | obj_pool_used--; |
204 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
205 | if (sched) |
206 | schedule_work(&debug_obj_work); |
207 | } |
208 | |
209 | /* |
210 | * We run out of memory. That means we probably have tons of objects |
211 | * allocated. |
212 | */ |
213 | static void debug_objects_oom(void) |
214 | { |
215 | struct debug_bucket *db = obj_hash; |
216 | struct hlist_node *node, *tmp; |
217 | HLIST_HEAD(freelist); |
218 | struct debug_obj *obj; |
219 | unsigned long flags; |
220 | int i; |
221 | |
222 | printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n"); |
223 | |
224 | for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { |
225 | raw_spin_lock_irqsave(&db->lock, flags); |
226 | hlist_move_list(&db->list, &freelist); |
227 | raw_spin_unlock_irqrestore(&db->lock, flags); |
228 | |
229 | /* Now free them */ |
230 | hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { |
231 | hlist_del(&obj->node); |
232 | free_object(obj); |
233 | } |
234 | } |
235 | } |
236 | |
237 | /* |
238 | * We use the pfn of the address for the hash. That way we can check |
239 | * for freed objects simply by checking the affected bucket. |
240 | */ |
241 | static struct debug_bucket *get_bucket(unsigned long addr) |
242 | { |
243 | unsigned long hash; |
244 | |
245 | hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS); |
246 | return &obj_hash[hash]; |
247 | } |
248 | |
249 | static void debug_print_object(struct debug_obj *obj, char *msg) |
250 | { |
251 | static int limit; |
252 | |
253 | if (limit < 5 && obj->descr != descr_test) { |
254 | limit++; |
255 | WARN(1, KERN_ERR "ODEBUG: %s %s object type: %s\n", msg, |
256 | obj_states[obj->state], obj->descr->name); |
257 | } |
258 | debug_objects_warnings++; |
259 | } |
260 | |
261 | /* |
262 | * Try to repair the damage, so we have a better chance to get useful |
263 | * debug output. |
264 | */ |
265 | static void |
266 | debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state), |
267 | void * addr, enum debug_obj_state state) |
268 | { |
269 | if (fixup) |
270 | debug_objects_fixups += fixup(addr, state); |
271 | } |
272 | |
273 | static void debug_object_is_on_stack(void *addr, int onstack) |
274 | { |
275 | int is_on_stack; |
276 | static int limit; |
277 | |
278 | if (limit > 4) |
279 | return; |
280 | |
281 | is_on_stack = object_is_on_stack(addr); |
282 | if (is_on_stack == onstack) |
283 | return; |
284 | |
285 | limit++; |
286 | if (is_on_stack) |
287 | printk(KERN_WARNING |
288 | "ODEBUG: object is on stack, but not annotated\n"); |
289 | else |
290 | printk(KERN_WARNING |
291 | "ODEBUG: object is not on stack, but annotated\n"); |
292 | WARN_ON(1); |
293 | } |
294 | |
295 | static void |
296 | __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) |
297 | { |
298 | enum debug_obj_state state; |
299 | struct debug_bucket *db; |
300 | struct debug_obj *obj; |
301 | unsigned long flags; |
302 | |
303 | fill_pool(); |
304 | |
305 | db = get_bucket((unsigned long) addr); |
306 | |
307 | raw_spin_lock_irqsave(&db->lock, flags); |
308 | |
309 | obj = lookup_object(addr, db); |
310 | if (!obj) { |
311 | obj = alloc_object(addr, db, descr); |
312 | if (!obj) { |
313 | debug_objects_enabled = 0; |
314 | raw_spin_unlock_irqrestore(&db->lock, flags); |
315 | debug_objects_oom(); |
316 | return; |
317 | } |
318 | debug_object_is_on_stack(addr, onstack); |
319 | } |
320 | |
321 | switch (obj->state) { |
322 | case ODEBUG_STATE_NONE: |
323 | case ODEBUG_STATE_INIT: |
324 | case ODEBUG_STATE_INACTIVE: |
325 | obj->state = ODEBUG_STATE_INIT; |
326 | break; |
327 | |
328 | case ODEBUG_STATE_ACTIVE: |
329 | debug_print_object(obj, "init"); |
330 | state = obj->state; |
331 | raw_spin_unlock_irqrestore(&db->lock, flags); |
332 | debug_object_fixup(descr->fixup_init, addr, state); |
333 | return; |
334 | |
335 | case ODEBUG_STATE_DESTROYED: |
336 | debug_print_object(obj, "init"); |
337 | break; |
338 | default: |
339 | break; |
340 | } |
341 | |
342 | raw_spin_unlock_irqrestore(&db->lock, flags); |
343 | } |
344 | |
345 | /** |
346 | * debug_object_init - debug checks when an object is initialized |
347 | * @addr: address of the object |
348 | * @descr: pointer to an object specific debug description structure |
349 | */ |
350 | void debug_object_init(void *addr, struct debug_obj_descr *descr) |
351 | { |
352 | if (!debug_objects_enabled) |
353 | return; |
354 | |
355 | __debug_object_init(addr, descr, 0); |
356 | } |
357 | |
358 | /** |
359 | * debug_object_init_on_stack - debug checks when an object on stack is |
360 | * initialized |
361 | * @addr: address of the object |
362 | * @descr: pointer to an object specific debug description structure |
363 | */ |
364 | void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) |
365 | { |
366 | if (!debug_objects_enabled) |
367 | return; |
368 | |
369 | __debug_object_init(addr, descr, 1); |
370 | } |
371 | |
372 | /** |
373 | * debug_object_activate - debug checks when an object is activated |
374 | * @addr: address of the object |
375 | * @descr: pointer to an object specific debug description structure |
376 | */ |
377 | void debug_object_activate(void *addr, struct debug_obj_descr *descr) |
378 | { |
379 | enum debug_obj_state state; |
380 | struct debug_bucket *db; |
381 | struct debug_obj *obj; |
382 | unsigned long flags; |
383 | |
384 | if (!debug_objects_enabled) |
385 | return; |
386 | |
387 | db = get_bucket((unsigned long) addr); |
388 | |
389 | raw_spin_lock_irqsave(&db->lock, flags); |
390 | |
391 | obj = lookup_object(addr, db); |
392 | if (obj) { |
393 | switch (obj->state) { |
394 | case ODEBUG_STATE_INIT: |
395 | case ODEBUG_STATE_INACTIVE: |
396 | obj->state = ODEBUG_STATE_ACTIVE; |
397 | break; |
398 | |
399 | case ODEBUG_STATE_ACTIVE: |
400 | debug_print_object(obj, "activate"); |
401 | state = obj->state; |
402 | raw_spin_unlock_irqrestore(&db->lock, flags); |
403 | debug_object_fixup(descr->fixup_activate, addr, state); |
404 | return; |
405 | |
406 | case ODEBUG_STATE_DESTROYED: |
407 | debug_print_object(obj, "activate"); |
408 | break; |
409 | default: |
410 | break; |
411 | } |
412 | raw_spin_unlock_irqrestore(&db->lock, flags); |
413 | return; |
414 | } |
415 | |
416 | raw_spin_unlock_irqrestore(&db->lock, flags); |
417 | /* |
418 | * This happens when a static object is activated. We |
419 | * let the type specific code decide whether this is |
420 | * true or not. |
421 | */ |
422 | debug_object_fixup(descr->fixup_activate, addr, |
423 | ODEBUG_STATE_NOTAVAILABLE); |
424 | } |
425 | |
426 | /** |
427 | * debug_object_deactivate - debug checks when an object is deactivated |
428 | * @addr: address of the object |
429 | * @descr: pointer to an object specific debug description structure |
430 | */ |
431 | void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) |
432 | { |
433 | struct debug_bucket *db; |
434 | struct debug_obj *obj; |
435 | unsigned long flags; |
436 | |
437 | if (!debug_objects_enabled) |
438 | return; |
439 | |
440 | db = get_bucket((unsigned long) addr); |
441 | |
442 | raw_spin_lock_irqsave(&db->lock, flags); |
443 | |
444 | obj = lookup_object(addr, db); |
445 | if (obj) { |
446 | switch (obj->state) { |
447 | case ODEBUG_STATE_INIT: |
448 | case ODEBUG_STATE_INACTIVE: |
449 | case ODEBUG_STATE_ACTIVE: |
450 | obj->state = ODEBUG_STATE_INACTIVE; |
451 | break; |
452 | |
453 | case ODEBUG_STATE_DESTROYED: |
454 | debug_print_object(obj, "deactivate"); |
455 | break; |
456 | default: |
457 | break; |
458 | } |
459 | } else { |
460 | struct debug_obj o = { .object = addr, |
461 | .state = ODEBUG_STATE_NOTAVAILABLE, |
462 | .descr = descr }; |
463 | |
464 | debug_print_object(&o, "deactivate"); |
465 | } |
466 | |
467 | raw_spin_unlock_irqrestore(&db->lock, flags); |
468 | } |
469 | |
470 | /** |
471 | * debug_object_destroy - debug checks when an object is destroyed |
472 | * @addr: address of the object |
473 | * @descr: pointer to an object specific debug description structure |
474 | */ |
475 | void debug_object_destroy(void *addr, struct debug_obj_descr *descr) |
476 | { |
477 | enum debug_obj_state state; |
478 | struct debug_bucket *db; |
479 | struct debug_obj *obj; |
480 | unsigned long flags; |
481 | |
482 | if (!debug_objects_enabled) |
483 | return; |
484 | |
485 | db = get_bucket((unsigned long) addr); |
486 | |
487 | raw_spin_lock_irqsave(&db->lock, flags); |
488 | |
489 | obj = lookup_object(addr, db); |
490 | if (!obj) |
491 | goto out_unlock; |
492 | |
493 | switch (obj->state) { |
494 | case ODEBUG_STATE_NONE: |
495 | case ODEBUG_STATE_INIT: |
496 | case ODEBUG_STATE_INACTIVE: |
497 | obj->state = ODEBUG_STATE_DESTROYED; |
498 | break; |
499 | case ODEBUG_STATE_ACTIVE: |
500 | debug_print_object(obj, "destroy"); |
501 | state = obj->state; |
502 | raw_spin_unlock_irqrestore(&db->lock, flags); |
503 | debug_object_fixup(descr->fixup_destroy, addr, state); |
504 | return; |
505 | |
506 | case ODEBUG_STATE_DESTROYED: |
507 | debug_print_object(obj, "destroy"); |
508 | break; |
509 | default: |
510 | break; |
511 | } |
512 | out_unlock: |
513 | raw_spin_unlock_irqrestore(&db->lock, flags); |
514 | } |
515 | |
516 | /** |
517 | * debug_object_free - debug checks when an object is freed |
518 | * @addr: address of the object |
519 | * @descr: pointer to an object specific debug description structure |
520 | */ |
521 | void debug_object_free(void *addr, struct debug_obj_descr *descr) |
522 | { |
523 | enum debug_obj_state state; |
524 | struct debug_bucket *db; |
525 | struct debug_obj *obj; |
526 | unsigned long flags; |
527 | |
528 | if (!debug_objects_enabled) |
529 | return; |
530 | |
531 | db = get_bucket((unsigned long) addr); |
532 | |
533 | raw_spin_lock_irqsave(&db->lock, flags); |
534 | |
535 | obj = lookup_object(addr, db); |
536 | if (!obj) |
537 | goto out_unlock; |
538 | |
539 | switch (obj->state) { |
540 | case ODEBUG_STATE_ACTIVE: |
541 | debug_print_object(obj, "free"); |
542 | state = obj->state; |
543 | raw_spin_unlock_irqrestore(&db->lock, flags); |
544 | debug_object_fixup(descr->fixup_free, addr, state); |
545 | return; |
546 | default: |
547 | hlist_del(&obj->node); |
548 | raw_spin_unlock_irqrestore(&db->lock, flags); |
549 | free_object(obj); |
550 | return; |
551 | } |
552 | out_unlock: |
553 | raw_spin_unlock_irqrestore(&db->lock, flags); |
554 | } |
555 | |
556 | #ifdef CONFIG_DEBUG_OBJECTS_FREE |
557 | static void __debug_check_no_obj_freed(const void *address, unsigned long size) |
558 | { |
559 | unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; |
560 | struct hlist_node *node, *tmp; |
561 | HLIST_HEAD(freelist); |
562 | struct debug_obj_descr *descr; |
563 | enum debug_obj_state state; |
564 | struct debug_bucket *db; |
565 | struct debug_obj *obj; |
566 | int cnt; |
567 | |
568 | saddr = (unsigned long) address; |
569 | eaddr = saddr + size; |
570 | paddr = saddr & ODEBUG_CHUNK_MASK; |
571 | chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); |
572 | chunks >>= ODEBUG_CHUNK_SHIFT; |
573 | |
574 | for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) { |
575 | db = get_bucket(paddr); |
576 | |
577 | repeat: |
578 | cnt = 0; |
579 | raw_spin_lock_irqsave(&db->lock, flags); |
580 | hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { |
581 | cnt++; |
582 | oaddr = (unsigned long) obj->object; |
583 | if (oaddr < saddr || oaddr >= eaddr) |
584 | continue; |
585 | |
586 | switch (obj->state) { |
587 | case ODEBUG_STATE_ACTIVE: |
588 | debug_print_object(obj, "free"); |
589 | descr = obj->descr; |
590 | state = obj->state; |
591 | raw_spin_unlock_irqrestore(&db->lock, flags); |
592 | debug_object_fixup(descr->fixup_free, |
593 | (void *) oaddr, state); |
594 | goto repeat; |
595 | default: |
596 | hlist_del(&obj->node); |
597 | hlist_add_head(&obj->node, &freelist); |
598 | break; |
599 | } |
600 | } |
601 | raw_spin_unlock_irqrestore(&db->lock, flags); |
602 | |
603 | /* Now free them */ |
604 | hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { |
605 | hlist_del(&obj->node); |
606 | free_object(obj); |
607 | } |
608 | |
609 | if (cnt > debug_objects_maxchain) |
610 | debug_objects_maxchain = cnt; |
611 | } |
612 | } |
613 | |
614 | void debug_check_no_obj_freed(const void *address, unsigned long size) |
615 | { |
616 | if (debug_objects_enabled) |
617 | __debug_check_no_obj_freed(address, size); |
618 | } |
619 | #endif |
620 | |
621 | #ifdef CONFIG_DEBUG_FS |
622 | |
623 | static int debug_stats_show(struct seq_file *m, void *v) |
624 | { |
625 | seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); |
626 | seq_printf(m, "warnings :%d\n", debug_objects_warnings); |
627 | seq_printf(m, "fixups :%d\n", debug_objects_fixups); |
628 | seq_printf(m, "pool_free :%d\n", obj_pool_free); |
629 | seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); |
630 | seq_printf(m, "pool_used :%d\n", obj_pool_used); |
631 | seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); |
632 | return 0; |
633 | } |
634 | |
635 | static int debug_stats_open(struct inode *inode, struct file *filp) |
636 | { |
637 | return single_open(filp, debug_stats_show, NULL); |
638 | } |
639 | |
640 | static const struct file_operations debug_stats_fops = { |
641 | .open = debug_stats_open, |
642 | .read = seq_read, |
643 | .llseek = seq_lseek, |
644 | .release = single_release, |
645 | }; |
646 | |
647 | static int __init debug_objects_init_debugfs(void) |
648 | { |
649 | struct dentry *dbgdir, *dbgstats; |
650 | |
651 | if (!debug_objects_enabled) |
652 | return 0; |
653 | |
654 | dbgdir = debugfs_create_dir("debug_objects", NULL); |
655 | if (!dbgdir) |
656 | return -ENOMEM; |
657 | |
658 | dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL, |
659 | &debug_stats_fops); |
660 | if (!dbgstats) |
661 | goto err; |
662 | |
663 | return 0; |
664 | |
665 | err: |
666 | debugfs_remove(dbgdir); |
667 | |
668 | return -ENOMEM; |
669 | } |
670 | __initcall(debug_objects_init_debugfs); |
671 | |
672 | #else |
673 | static inline void debug_objects_init_debugfs(void) { } |
674 | #endif |
675 | |
676 | #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST |
677 | |
678 | /* Random data structure for the self test */ |
679 | struct self_test { |
680 | unsigned long dummy1[6]; |
681 | int static_init; |
682 | unsigned long dummy2[3]; |
683 | }; |
684 | |
685 | static __initdata struct debug_obj_descr descr_type_test; |
686 | |
687 | /* |
688 | * fixup_init is called when: |
689 | * - an active object is initialized |
690 | */ |
691 | static int __init fixup_init(void *addr, enum debug_obj_state state) |
692 | { |
693 | struct self_test *obj = addr; |
694 | |
695 | switch (state) { |
696 | case ODEBUG_STATE_ACTIVE: |
697 | debug_object_deactivate(obj, &descr_type_test); |
698 | debug_object_init(obj, &descr_type_test); |
699 | return 1; |
700 | default: |
701 | return 0; |
702 | } |
703 | } |
704 | |
705 | /* |
706 | * fixup_activate is called when: |
707 | * - an active object is activated |
708 | * - an unknown object is activated (might be a statically initialized object) |
709 | */ |
710 | static int __init fixup_activate(void *addr, enum debug_obj_state state) |
711 | { |
712 | struct self_test *obj = addr; |
713 | |
714 | switch (state) { |
715 | case ODEBUG_STATE_NOTAVAILABLE: |
716 | if (obj->static_init == 1) { |
717 | debug_object_init(obj, &descr_type_test); |
718 | debug_object_activate(obj, &descr_type_test); |
719 | /* |
720 | * Real code should return 0 here ! This is |
721 | * not a fixup of some bad behaviour. We |
722 | * merily call the debug_init function to keep |
723 | * track of the object. |
724 | */ |
725 | return 1; |
726 | } else { |
727 | /* Real code needs to emit a warning here */ |
728 | } |
729 | return 0; |
730 | |
731 | case ODEBUG_STATE_ACTIVE: |
732 | debug_object_deactivate(obj, &descr_type_test); |
733 | debug_object_activate(obj, &descr_type_test); |
734 | return 1; |
735 | |
736 | default: |
737 | return 0; |
738 | } |
739 | } |
740 | |
741 | /* |
742 | * fixup_destroy is called when: |
743 | * - an active object is destroyed |
744 | */ |
745 | static int __init fixup_destroy(void *addr, enum debug_obj_state state) |
746 | { |
747 | struct self_test *obj = addr; |
748 | |
749 | switch (state) { |
750 | case ODEBUG_STATE_ACTIVE: |
751 | debug_object_deactivate(obj, &descr_type_test); |
752 | debug_object_destroy(obj, &descr_type_test); |
753 | return 1; |
754 | default: |
755 | return 0; |
756 | } |
757 | } |
758 | |
759 | /* |
760 | * fixup_free is called when: |
761 | * - an active object is freed |
762 | */ |
763 | static int __init fixup_free(void *addr, enum debug_obj_state state) |
764 | { |
765 | struct self_test *obj = addr; |
766 | |
767 | switch (state) { |
768 | case ODEBUG_STATE_ACTIVE: |
769 | debug_object_deactivate(obj, &descr_type_test); |
770 | debug_object_free(obj, &descr_type_test); |
771 | return 1; |
772 | default: |
773 | return 0; |
774 | } |
775 | } |
776 | |
777 | static int |
778 | check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) |
779 | { |
780 | struct debug_bucket *db; |
781 | struct debug_obj *obj; |
782 | unsigned long flags; |
783 | int res = -EINVAL; |
784 | |
785 | db = get_bucket((unsigned long) addr); |
786 | |
787 | raw_spin_lock_irqsave(&db->lock, flags); |
788 | |
789 | obj = lookup_object(addr, db); |
790 | if (!obj && state != ODEBUG_STATE_NONE) { |
791 | WARN(1, KERN_ERR "ODEBUG: selftest object not found\n"); |
792 | goto out; |
793 | } |
794 | if (obj && obj->state != state) { |
795 | WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", |
796 | obj->state, state); |
797 | goto out; |
798 | } |
799 | if (fixups != debug_objects_fixups) { |
800 | WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", |
801 | fixups, debug_objects_fixups); |
802 | goto out; |
803 | } |
804 | if (warnings != debug_objects_warnings) { |
805 | WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", |
806 | warnings, debug_objects_warnings); |
807 | goto out; |
808 | } |
809 | res = 0; |
810 | out: |
811 | raw_spin_unlock_irqrestore(&db->lock, flags); |
812 | if (res) |
813 | debug_objects_enabled = 0; |
814 | return res; |
815 | } |
816 | |
817 | static __initdata struct debug_obj_descr descr_type_test = { |
818 | .name = "selftest", |
819 | .fixup_init = fixup_init, |
820 | .fixup_activate = fixup_activate, |
821 | .fixup_destroy = fixup_destroy, |
822 | .fixup_free = fixup_free, |
823 | }; |
824 | |
825 | static __initdata struct self_test obj = { .static_init = 0 }; |
826 | |
827 | static void __init debug_objects_selftest(void) |
828 | { |
829 | int fixups, oldfixups, warnings, oldwarnings; |
830 | unsigned long flags; |
831 | |
832 | local_irq_save(flags); |
833 | |
834 | fixups = oldfixups = debug_objects_fixups; |
835 | warnings = oldwarnings = debug_objects_warnings; |
836 | descr_test = &descr_type_test; |
837 | |
838 | debug_object_init(&obj, &descr_type_test); |
839 | if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) |
840 | goto out; |
841 | debug_object_activate(&obj, &descr_type_test); |
842 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) |
843 | goto out; |
844 | debug_object_activate(&obj, &descr_type_test); |
845 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings)) |
846 | goto out; |
847 | debug_object_deactivate(&obj, &descr_type_test); |
848 | if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings)) |
849 | goto out; |
850 | debug_object_destroy(&obj, &descr_type_test); |
851 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings)) |
852 | goto out; |
853 | debug_object_init(&obj, &descr_type_test); |
854 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) |
855 | goto out; |
856 | debug_object_activate(&obj, &descr_type_test); |
857 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) |
858 | goto out; |
859 | debug_object_deactivate(&obj, &descr_type_test); |
860 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) |
861 | goto out; |
862 | debug_object_free(&obj, &descr_type_test); |
863 | if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) |
864 | goto out; |
865 | |
866 | obj.static_init = 1; |
867 | debug_object_activate(&obj, &descr_type_test); |
868 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, warnings)) |
869 | goto out; |
870 | debug_object_init(&obj, &descr_type_test); |
871 | if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings)) |
872 | goto out; |
873 | debug_object_free(&obj, &descr_type_test); |
874 | if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) |
875 | goto out; |
876 | |
877 | #ifdef CONFIG_DEBUG_OBJECTS_FREE |
878 | debug_object_init(&obj, &descr_type_test); |
879 | if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) |
880 | goto out; |
881 | debug_object_activate(&obj, &descr_type_test); |
882 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) |
883 | goto out; |
884 | __debug_check_no_obj_freed(&obj, sizeof(obj)); |
885 | if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings)) |
886 | goto out; |
887 | #endif |
888 | printk(KERN_INFO "ODEBUG: selftest passed\n"); |
889 | |
890 | out: |
891 | debug_objects_fixups = oldfixups; |
892 | debug_objects_warnings = oldwarnings; |
893 | descr_test = NULL; |
894 | |
895 | local_irq_restore(flags); |
896 | } |
897 | #else |
898 | static inline void debug_objects_selftest(void) { } |
899 | #endif |
900 | |
901 | /* |
902 | * Called during early boot to initialize the hash buckets and link |
903 | * the static object pool objects into the poll list. After this call |
904 | * the object tracker is fully operational. |
905 | */ |
906 | void __init debug_objects_early_init(void) |
907 | { |
908 | int i; |
909 | |
910 | for (i = 0; i < ODEBUG_HASH_SIZE; i++) |
911 | raw_spin_lock_init(&obj_hash[i].lock); |
912 | |
913 | for (i = 0; i < ODEBUG_POOL_SIZE; i++) |
914 | hlist_add_head(&obj_static_pool[i].node, &obj_pool); |
915 | } |
916 | |
917 | /* |
918 | * Convert the statically allocated objects to dynamic ones: |
919 | */ |
920 | static int debug_objects_replace_static_objects(void) |
921 | { |
922 | struct debug_bucket *db = obj_hash; |
923 | struct hlist_node *node, *tmp; |
924 | struct debug_obj *obj, *new; |
925 | HLIST_HEAD(objects); |
926 | int i, cnt = 0; |
927 | |
928 | for (i = 0; i < ODEBUG_POOL_SIZE; i++) { |
929 | obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL); |
930 | if (!obj) |
931 | goto free; |
932 | hlist_add_head(&obj->node, &objects); |
933 | } |
934 | |
935 | /* |
936 | * When debug_objects_mem_init() is called we know that only |
937 | * one CPU is up, so disabling interrupts is enough |
938 | * protection. This avoids the lockdep hell of lock ordering. |
939 | */ |
940 | local_irq_disable(); |
941 | |
942 | /* Remove the statically allocated objects from the pool */ |
943 | hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node) |
944 | hlist_del(&obj->node); |
945 | /* Move the allocated objects to the pool */ |
946 | hlist_move_list(&objects, &obj_pool); |
947 | |
948 | /* Replace the active object references */ |
949 | for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { |
950 | hlist_move_list(&db->list, &objects); |
951 | |
952 | hlist_for_each_entry(obj, node, &objects, node) { |
953 | new = hlist_entry(obj_pool.first, typeof(*obj), node); |
954 | hlist_del(&new->node); |
955 | /* copy object data */ |
956 | *new = *obj; |
957 | hlist_add_head(&new->node, &db->list); |
958 | cnt++; |
959 | } |
960 | } |
961 | |
962 | printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt, |
963 | obj_pool_used); |
964 | local_irq_enable(); |
965 | return 0; |
966 | free: |
967 | hlist_for_each_entry_safe(obj, node, tmp, &objects, node) { |
968 | hlist_del(&obj->node); |
969 | kmem_cache_free(obj_cache, obj); |
970 | } |
971 | return -ENOMEM; |
972 | } |
973 | |
974 | /* |
975 | * Called after the kmem_caches are functional to setup a dedicated |
976 | * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag |
977 | * prevents that the debug code is called on kmem_cache_free() for the |
978 | * debug tracker objects to avoid recursive calls. |
979 | */ |
980 | void __init debug_objects_mem_init(void) |
981 | { |
982 | if (!debug_objects_enabled) |
983 | return; |
984 | |
985 | obj_cache = kmem_cache_create("debug_objects_cache", |
986 | sizeof (struct debug_obj), 0, |
987 | SLAB_DEBUG_OBJECTS, NULL); |
988 | |
989 | if (!obj_cache || debug_objects_replace_static_objects()) { |
990 | debug_objects_enabled = 0; |
991 | if (obj_cache) |
992 | kmem_cache_destroy(obj_cache); |
993 | printk(KERN_WARNING "ODEBUG: out of memory.\n"); |
994 | } else |
995 | debug_objects_selftest(); |
996 | } |
997 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9