Root/
Source at commit b13e7eb172b6f08e5fc22da162bdde5fcde201b5 created 11 years 11 months ago. By Maarten ter Huurne, fbcon: Add 6x10 font | |
---|---|
1 | /* |
2 | * Generic infrastructure for lifetime debugging of objects. |
3 | * |
4 | * Started by Thomas Gleixner |
5 | * |
6 | * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de> |
7 | * |
8 | * For licencing details see kernel-base/COPYING |
9 | */ |
10 | #include <linux/debugobjects.h> |
11 | #include <linux/interrupt.h> |
12 | #include <linux/sched.h> |
13 | #include <linux/seq_file.h> |
14 | #include <linux/debugfs.h> |
15 | #include <linux/slab.h> |
16 | #include <linux/hash.h> |
17 | |
18 | #define ODEBUG_HASH_BITS 14 |
19 | #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS) |
20 | |
21 | #define ODEBUG_POOL_SIZE 512 |
22 | #define ODEBUG_POOL_MIN_LEVEL 256 |
23 | |
24 | #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT |
25 | #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) |
26 | #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) |
27 | |
28 | struct debug_bucket { |
29 | struct hlist_head list; |
30 | raw_spinlock_t lock; |
31 | }; |
32 | |
33 | static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; |
34 | |
35 | static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; |
36 | |
37 | static DEFINE_RAW_SPINLOCK(pool_lock); |
38 | |
39 | static HLIST_HEAD(obj_pool); |
40 | |
41 | static int obj_pool_min_free = ODEBUG_POOL_SIZE; |
42 | static int obj_pool_free = ODEBUG_POOL_SIZE; |
43 | static int obj_pool_used; |
44 | static int obj_pool_max_used; |
45 | static struct kmem_cache *obj_cache; |
46 | |
47 | static int debug_objects_maxchain __read_mostly; |
48 | static int debug_objects_fixups __read_mostly; |
49 | static int debug_objects_warnings __read_mostly; |
50 | static int debug_objects_enabled __read_mostly |
51 | = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT; |
52 | |
53 | static struct debug_obj_descr *descr_test __read_mostly; |
54 | |
55 | static void free_obj_work(struct work_struct *work); |
56 | static DECLARE_WORK(debug_obj_work, free_obj_work); |
57 | |
58 | static int __init enable_object_debug(char *str) |
59 | { |
60 | debug_objects_enabled = 1; |
61 | return 0; |
62 | } |
63 | |
64 | static int __init disable_object_debug(char *str) |
65 | { |
66 | debug_objects_enabled = 0; |
67 | return 0; |
68 | } |
69 | |
70 | early_param("debug_objects", enable_object_debug); |
71 | early_param("no_debug_objects", disable_object_debug); |
72 | |
73 | static const char *obj_states[ODEBUG_STATE_MAX] = { |
74 | [ODEBUG_STATE_NONE] = "none", |
75 | [ODEBUG_STATE_INIT] = "initialized", |
76 | [ODEBUG_STATE_INACTIVE] = "inactive", |
77 | [ODEBUG_STATE_ACTIVE] = "active", |
78 | [ODEBUG_STATE_DESTROYED] = "destroyed", |
79 | [ODEBUG_STATE_NOTAVAILABLE] = "not available", |
80 | }; |
81 | |
82 | static int fill_pool(void) |
83 | { |
84 | gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; |
85 | struct debug_obj *new; |
86 | unsigned long flags; |
87 | |
88 | if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) |
89 | return obj_pool_free; |
90 | |
91 | if (unlikely(!obj_cache)) |
92 | return obj_pool_free; |
93 | |
94 | while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) { |
95 | |
96 | new = kmem_cache_zalloc(obj_cache, gfp); |
97 | if (!new) |
98 | return obj_pool_free; |
99 | |
100 | raw_spin_lock_irqsave(&pool_lock, flags); |
101 | hlist_add_head(&new->node, &obj_pool); |
102 | obj_pool_free++; |
103 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
104 | } |
105 | return obj_pool_free; |
106 | } |
107 | |
108 | /* |
109 | * Lookup an object in the hash bucket. |
110 | */ |
111 | static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) |
112 | { |
113 | struct hlist_node *node; |
114 | struct debug_obj *obj; |
115 | int cnt = 0; |
116 | |
117 | hlist_for_each_entry(obj, node, &b->list, node) { |
118 | cnt++; |
119 | if (obj->object == addr) |
120 | return obj; |
121 | } |
122 | if (cnt > debug_objects_maxchain) |
123 | debug_objects_maxchain = cnt; |
124 | |
125 | return NULL; |
126 | } |
127 | |
128 | /* |
129 | * Allocate a new object. If the pool is empty, switch off the debugger. |
130 | * Must be called with interrupts disabled. |
131 | */ |
132 | static struct debug_obj * |
133 | alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) |
134 | { |
135 | struct debug_obj *obj = NULL; |
136 | |
137 | raw_spin_lock(&pool_lock); |
138 | if (obj_pool.first) { |
139 | obj = hlist_entry(obj_pool.first, typeof(*obj), node); |
140 | |
141 | obj->object = addr; |
142 | obj->descr = descr; |
143 | obj->state = ODEBUG_STATE_NONE; |
144 | obj->astate = 0; |
145 | hlist_del(&obj->node); |
146 | |
147 | hlist_add_head(&obj->node, &b->list); |
148 | |
149 | obj_pool_used++; |
150 | if (obj_pool_used > obj_pool_max_used) |
151 | obj_pool_max_used = obj_pool_used; |
152 | |
153 | obj_pool_free--; |
154 | if (obj_pool_free < obj_pool_min_free) |
155 | obj_pool_min_free = obj_pool_free; |
156 | } |
157 | raw_spin_unlock(&pool_lock); |
158 | |
159 | return obj; |
160 | } |
161 | |
162 | /* |
163 | * workqueue function to free objects. |
164 | */ |
165 | static void free_obj_work(struct work_struct *work) |
166 | { |
167 | struct debug_obj *obj; |
168 | unsigned long flags; |
169 | |
170 | raw_spin_lock_irqsave(&pool_lock, flags); |
171 | while (obj_pool_free > ODEBUG_POOL_SIZE) { |
172 | obj = hlist_entry(obj_pool.first, typeof(*obj), node); |
173 | hlist_del(&obj->node); |
174 | obj_pool_free--; |
175 | /* |
176 | * We release pool_lock across kmem_cache_free() to |
177 | * avoid contention on pool_lock. |
178 | */ |
179 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
180 | kmem_cache_free(obj_cache, obj); |
181 | raw_spin_lock_irqsave(&pool_lock, flags); |
182 | } |
183 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
184 | } |
185 | |
186 | /* |
187 | * Put the object back into the pool and schedule work to free objects |
188 | * if necessary. |
189 | */ |
190 | static void free_object(struct debug_obj *obj) |
191 | { |
192 | unsigned long flags; |
193 | int sched = 0; |
194 | |
195 | raw_spin_lock_irqsave(&pool_lock, flags); |
196 | /* |
197 | * schedule work when the pool is filled and the cache is |
198 | * initialized: |
199 | */ |
200 | if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache) |
201 | sched = keventd_up() && !work_pending(&debug_obj_work); |
202 | hlist_add_head(&obj->node, &obj_pool); |
203 | obj_pool_free++; |
204 | obj_pool_used--; |
205 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
206 | if (sched) |
207 | schedule_work(&debug_obj_work); |
208 | } |
209 | |
210 | /* |
211 | * We run out of memory. That means we probably have tons of objects |
212 | * allocated. |
213 | */ |
214 | static void debug_objects_oom(void) |
215 | { |
216 | struct debug_bucket *db = obj_hash; |
217 | struct hlist_node *node, *tmp; |
218 | HLIST_HEAD(freelist); |
219 | struct debug_obj *obj; |
220 | unsigned long flags; |
221 | int i; |
222 | |
223 | printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n"); |
224 | |
225 | for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { |
226 | raw_spin_lock_irqsave(&db->lock, flags); |
227 | hlist_move_list(&db->list, &freelist); |
228 | raw_spin_unlock_irqrestore(&db->lock, flags); |
229 | |
230 | /* Now free them */ |
231 | hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { |
232 | hlist_del(&obj->node); |
233 | free_object(obj); |
234 | } |
235 | } |
236 | } |
237 | |
238 | /* |
239 | * We use the pfn of the address for the hash. That way we can check |
240 | * for freed objects simply by checking the affected bucket. |
241 | */ |
242 | static struct debug_bucket *get_bucket(unsigned long addr) |
243 | { |
244 | unsigned long hash; |
245 | |
246 | hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS); |
247 | return &obj_hash[hash]; |
248 | } |
249 | |
250 | static void debug_print_object(struct debug_obj *obj, char *msg) |
251 | { |
252 | struct debug_obj_descr *descr = obj->descr; |
253 | static int limit; |
254 | |
255 | if (limit < 5 && descr != descr_test) { |
256 | void *hint = descr->debug_hint ? |
257 | descr->debug_hint(obj->object) : NULL; |
258 | limit++; |
259 | WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) " |
260 | "object type: %s hint: %pS\n", |
261 | msg, obj_states[obj->state], obj->astate, |
262 | descr->name, hint); |
263 | } |
264 | debug_objects_warnings++; |
265 | } |
266 | |
267 | /* |
268 | * Try to repair the damage, so we have a better chance to get useful |
269 | * debug output. |
270 | */ |
271 | static int |
272 | debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state), |
273 | void * addr, enum debug_obj_state state) |
274 | { |
275 | int fixed = 0; |
276 | |
277 | if (fixup) |
278 | fixed = fixup(addr, state); |
279 | debug_objects_fixups += fixed; |
280 | return fixed; |
281 | } |
282 | |
283 | static void debug_object_is_on_stack(void *addr, int onstack) |
284 | { |
285 | int is_on_stack; |
286 | static int limit; |
287 | |
288 | if (limit > 4) |
289 | return; |
290 | |
291 | is_on_stack = object_is_on_stack(addr); |
292 | if (is_on_stack == onstack) |
293 | return; |
294 | |
295 | limit++; |
296 | if (is_on_stack) |
297 | printk(KERN_WARNING |
298 | "ODEBUG: object is on stack, but not annotated\n"); |
299 | else |
300 | printk(KERN_WARNING |
301 | "ODEBUG: object is not on stack, but annotated\n"); |
302 | WARN_ON(1); |
303 | } |
304 | |
305 | static void |
306 | __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) |
307 | { |
308 | enum debug_obj_state state; |
309 | struct debug_bucket *db; |
310 | struct debug_obj *obj; |
311 | unsigned long flags; |
312 | |
313 | fill_pool(); |
314 | |
315 | db = get_bucket((unsigned long) addr); |
316 | |
317 | raw_spin_lock_irqsave(&db->lock, flags); |
318 | |
319 | obj = lookup_object(addr, db); |
320 | if (!obj) { |
321 | obj = alloc_object(addr, db, descr); |
322 | if (!obj) { |
323 | debug_objects_enabled = 0; |
324 | raw_spin_unlock_irqrestore(&db->lock, flags); |
325 | debug_objects_oom(); |
326 | return; |
327 | } |
328 | debug_object_is_on_stack(addr, onstack); |
329 | } |
330 | |
331 | switch (obj->state) { |
332 | case ODEBUG_STATE_NONE: |
333 | case ODEBUG_STATE_INIT: |
334 | case ODEBUG_STATE_INACTIVE: |
335 | obj->state = ODEBUG_STATE_INIT; |
336 | break; |
337 | |
338 | case ODEBUG_STATE_ACTIVE: |
339 | debug_print_object(obj, "init"); |
340 | state = obj->state; |
341 | raw_spin_unlock_irqrestore(&db->lock, flags); |
342 | debug_object_fixup(descr->fixup_init, addr, state); |
343 | return; |
344 | |
345 | case ODEBUG_STATE_DESTROYED: |
346 | debug_print_object(obj, "init"); |
347 | break; |
348 | default: |
349 | break; |
350 | } |
351 | |
352 | raw_spin_unlock_irqrestore(&db->lock, flags); |
353 | } |
354 | |
355 | /** |
356 | * debug_object_init - debug checks when an object is initialized |
357 | * @addr: address of the object |
358 | * @descr: pointer to an object specific debug description structure |
359 | */ |
360 | void debug_object_init(void *addr, struct debug_obj_descr *descr) |
361 | { |
362 | if (!debug_objects_enabled) |
363 | return; |
364 | |
365 | __debug_object_init(addr, descr, 0); |
366 | } |
367 | |
368 | /** |
369 | * debug_object_init_on_stack - debug checks when an object on stack is |
370 | * initialized |
371 | * @addr: address of the object |
372 | * @descr: pointer to an object specific debug description structure |
373 | */ |
374 | void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) |
375 | { |
376 | if (!debug_objects_enabled) |
377 | return; |
378 | |
379 | __debug_object_init(addr, descr, 1); |
380 | } |
381 | |
382 | /** |
383 | * debug_object_activate - debug checks when an object is activated |
384 | * @addr: address of the object |
385 | * @descr: pointer to an object specific debug description structure |
386 | */ |
387 | void debug_object_activate(void *addr, struct debug_obj_descr *descr) |
388 | { |
389 | enum debug_obj_state state; |
390 | struct debug_bucket *db; |
391 | struct debug_obj *obj; |
392 | unsigned long flags; |
393 | struct debug_obj o = { .object = addr, |
394 | .state = ODEBUG_STATE_NOTAVAILABLE, |
395 | .descr = descr }; |
396 | |
397 | if (!debug_objects_enabled) |
398 | return; |
399 | |
400 | db = get_bucket((unsigned long) addr); |
401 | |
402 | raw_spin_lock_irqsave(&db->lock, flags); |
403 | |
404 | obj = lookup_object(addr, db); |
405 | if (obj) { |
406 | switch (obj->state) { |
407 | case ODEBUG_STATE_INIT: |
408 | case ODEBUG_STATE_INACTIVE: |
409 | obj->state = ODEBUG_STATE_ACTIVE; |
410 | break; |
411 | |
412 | case ODEBUG_STATE_ACTIVE: |
413 | debug_print_object(obj, "activate"); |
414 | state = obj->state; |
415 | raw_spin_unlock_irqrestore(&db->lock, flags); |
416 | debug_object_fixup(descr->fixup_activate, addr, state); |
417 | return; |
418 | |
419 | case ODEBUG_STATE_DESTROYED: |
420 | debug_print_object(obj, "activate"); |
421 | break; |
422 | default: |
423 | break; |
424 | } |
425 | raw_spin_unlock_irqrestore(&db->lock, flags); |
426 | return; |
427 | } |
428 | |
429 | raw_spin_unlock_irqrestore(&db->lock, flags); |
430 | /* |
431 | * This happens when a static object is activated. We |
432 | * let the type specific code decide whether this is |
433 | * true or not. |
434 | */ |
435 | if (debug_object_fixup(descr->fixup_activate, addr, |
436 | ODEBUG_STATE_NOTAVAILABLE)) |
437 | debug_print_object(&o, "activate"); |
438 | } |
439 | |
440 | /** |
441 | * debug_object_deactivate - debug checks when an object is deactivated |
442 | * @addr: address of the object |
443 | * @descr: pointer to an object specific debug description structure |
444 | */ |
445 | void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) |
446 | { |
447 | struct debug_bucket *db; |
448 | struct debug_obj *obj; |
449 | unsigned long flags; |
450 | |
451 | if (!debug_objects_enabled) |
452 | return; |
453 | |
454 | db = get_bucket((unsigned long) addr); |
455 | |
456 | raw_spin_lock_irqsave(&db->lock, flags); |
457 | |
458 | obj = lookup_object(addr, db); |
459 | if (obj) { |
460 | switch (obj->state) { |
461 | case ODEBUG_STATE_INIT: |
462 | case ODEBUG_STATE_INACTIVE: |
463 | case ODEBUG_STATE_ACTIVE: |
464 | if (!obj->astate) |
465 | obj->state = ODEBUG_STATE_INACTIVE; |
466 | else |
467 | debug_print_object(obj, "deactivate"); |
468 | break; |
469 | |
470 | case ODEBUG_STATE_DESTROYED: |
471 | debug_print_object(obj, "deactivate"); |
472 | break; |
473 | default: |
474 | break; |
475 | } |
476 | } else { |
477 | struct debug_obj o = { .object = addr, |
478 | .state = ODEBUG_STATE_NOTAVAILABLE, |
479 | .descr = descr }; |
480 | |
481 | debug_print_object(&o, "deactivate"); |
482 | } |
483 | |
484 | raw_spin_unlock_irqrestore(&db->lock, flags); |
485 | } |
486 | |
487 | /** |
488 | * debug_object_destroy - debug checks when an object is destroyed |
489 | * @addr: address of the object |
490 | * @descr: pointer to an object specific debug description structure |
491 | */ |
492 | void debug_object_destroy(void *addr, struct debug_obj_descr *descr) |
493 | { |
494 | enum debug_obj_state state; |
495 | struct debug_bucket *db; |
496 | struct debug_obj *obj; |
497 | unsigned long flags; |
498 | |
499 | if (!debug_objects_enabled) |
500 | return; |
501 | |
502 | db = get_bucket((unsigned long) addr); |
503 | |
504 | raw_spin_lock_irqsave(&db->lock, flags); |
505 | |
506 | obj = lookup_object(addr, db); |
507 | if (!obj) |
508 | goto out_unlock; |
509 | |
510 | switch (obj->state) { |
511 | case ODEBUG_STATE_NONE: |
512 | case ODEBUG_STATE_INIT: |
513 | case ODEBUG_STATE_INACTIVE: |
514 | obj->state = ODEBUG_STATE_DESTROYED; |
515 | break; |
516 | case ODEBUG_STATE_ACTIVE: |
517 | debug_print_object(obj, "destroy"); |
518 | state = obj->state; |
519 | raw_spin_unlock_irqrestore(&db->lock, flags); |
520 | debug_object_fixup(descr->fixup_destroy, addr, state); |
521 | return; |
522 | |
523 | case ODEBUG_STATE_DESTROYED: |
524 | debug_print_object(obj, "destroy"); |
525 | break; |
526 | default: |
527 | break; |
528 | } |
529 | out_unlock: |
530 | raw_spin_unlock_irqrestore(&db->lock, flags); |
531 | } |
532 | |
533 | /** |
534 | * debug_object_free - debug checks when an object is freed |
535 | * @addr: address of the object |
536 | * @descr: pointer to an object specific debug description structure |
537 | */ |
538 | void debug_object_free(void *addr, struct debug_obj_descr *descr) |
539 | { |
540 | enum debug_obj_state state; |
541 | struct debug_bucket *db; |
542 | struct debug_obj *obj; |
543 | unsigned long flags; |
544 | |
545 | if (!debug_objects_enabled) |
546 | return; |
547 | |
548 | db = get_bucket((unsigned long) addr); |
549 | |
550 | raw_spin_lock_irqsave(&db->lock, flags); |
551 | |
552 | obj = lookup_object(addr, db); |
553 | if (!obj) |
554 | goto out_unlock; |
555 | |
556 | switch (obj->state) { |
557 | case ODEBUG_STATE_ACTIVE: |
558 | debug_print_object(obj, "free"); |
559 | state = obj->state; |
560 | raw_spin_unlock_irqrestore(&db->lock, flags); |
561 | debug_object_fixup(descr->fixup_free, addr, state); |
562 | return; |
563 | default: |
564 | hlist_del(&obj->node); |
565 | raw_spin_unlock_irqrestore(&db->lock, flags); |
566 | free_object(obj); |
567 | return; |
568 | } |
569 | out_unlock: |
570 | raw_spin_unlock_irqrestore(&db->lock, flags); |
571 | } |
572 | |
573 | /** |
574 | * debug_object_assert_init - debug checks when object should be init-ed |
575 | * @addr: address of the object |
576 | * @descr: pointer to an object specific debug description structure |
577 | */ |
578 | void debug_object_assert_init(void *addr, struct debug_obj_descr *descr) |
579 | { |
580 | struct debug_bucket *db; |
581 | struct debug_obj *obj; |
582 | unsigned long flags; |
583 | |
584 | if (!debug_objects_enabled) |
585 | return; |
586 | |
587 | db = get_bucket((unsigned long) addr); |
588 | |
589 | raw_spin_lock_irqsave(&db->lock, flags); |
590 | |
591 | obj = lookup_object(addr, db); |
592 | if (!obj) { |
593 | struct debug_obj o = { .object = addr, |
594 | .state = ODEBUG_STATE_NOTAVAILABLE, |
595 | .descr = descr }; |
596 | |
597 | raw_spin_unlock_irqrestore(&db->lock, flags); |
598 | /* |
599 | * Maybe the object is static. Let the type specific |
600 | * code decide what to do. |
601 | */ |
602 | if (debug_object_fixup(descr->fixup_assert_init, addr, |
603 | ODEBUG_STATE_NOTAVAILABLE)) |
604 | debug_print_object(&o, "assert_init"); |
605 | return; |
606 | } |
607 | |
608 | raw_spin_unlock_irqrestore(&db->lock, flags); |
609 | } |
610 | |
611 | /** |
612 | * debug_object_active_state - debug checks object usage state machine |
613 | * @addr: address of the object |
614 | * @descr: pointer to an object specific debug description structure |
615 | * @expect: expected state |
616 | * @next: state to move to if expected state is found |
617 | */ |
618 | void |
619 | debug_object_active_state(void *addr, struct debug_obj_descr *descr, |
620 | unsigned int expect, unsigned int next) |
621 | { |
622 | struct debug_bucket *db; |
623 | struct debug_obj *obj; |
624 | unsigned long flags; |
625 | |
626 | if (!debug_objects_enabled) |
627 | return; |
628 | |
629 | db = get_bucket((unsigned long) addr); |
630 | |
631 | raw_spin_lock_irqsave(&db->lock, flags); |
632 | |
633 | obj = lookup_object(addr, db); |
634 | if (obj) { |
635 | switch (obj->state) { |
636 | case ODEBUG_STATE_ACTIVE: |
637 | if (obj->astate == expect) |
638 | obj->astate = next; |
639 | else |
640 | debug_print_object(obj, "active_state"); |
641 | break; |
642 | |
643 | default: |
644 | debug_print_object(obj, "active_state"); |
645 | break; |
646 | } |
647 | } else { |
648 | struct debug_obj o = { .object = addr, |
649 | .state = ODEBUG_STATE_NOTAVAILABLE, |
650 | .descr = descr }; |
651 | |
652 | debug_print_object(&o, "active_state"); |
653 | } |
654 | |
655 | raw_spin_unlock_irqrestore(&db->lock, flags); |
656 | } |
657 | |
658 | #ifdef CONFIG_DEBUG_OBJECTS_FREE |
659 | static void __debug_check_no_obj_freed(const void *address, unsigned long size) |
660 | { |
661 | unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; |
662 | struct hlist_node *node, *tmp; |
663 | HLIST_HEAD(freelist); |
664 | struct debug_obj_descr *descr; |
665 | enum debug_obj_state state; |
666 | struct debug_bucket *db; |
667 | struct debug_obj *obj; |
668 | int cnt; |
669 | |
670 | saddr = (unsigned long) address; |
671 | eaddr = saddr + size; |
672 | paddr = saddr & ODEBUG_CHUNK_MASK; |
673 | chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); |
674 | chunks >>= ODEBUG_CHUNK_SHIFT; |
675 | |
676 | for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) { |
677 | db = get_bucket(paddr); |
678 | |
679 | repeat: |
680 | cnt = 0; |
681 | raw_spin_lock_irqsave(&db->lock, flags); |
682 | hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { |
683 | cnt++; |
684 | oaddr = (unsigned long) obj->object; |
685 | if (oaddr < saddr || oaddr >= eaddr) |
686 | continue; |
687 | |
688 | switch (obj->state) { |
689 | case ODEBUG_STATE_ACTIVE: |
690 | debug_print_object(obj, "free"); |
691 | descr = obj->descr; |
692 | state = obj->state; |
693 | raw_spin_unlock_irqrestore(&db->lock, flags); |
694 | debug_object_fixup(descr->fixup_free, |
695 | (void *) oaddr, state); |
696 | goto repeat; |
697 | default: |
698 | hlist_del(&obj->node); |
699 | hlist_add_head(&obj->node, &freelist); |
700 | break; |
701 | } |
702 | } |
703 | raw_spin_unlock_irqrestore(&db->lock, flags); |
704 | |
705 | /* Now free them */ |
706 | hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { |
707 | hlist_del(&obj->node); |
708 | free_object(obj); |
709 | } |
710 | |
711 | if (cnt > debug_objects_maxchain) |
712 | debug_objects_maxchain = cnt; |
713 | } |
714 | } |
715 | |
716 | void debug_check_no_obj_freed(const void *address, unsigned long size) |
717 | { |
718 | if (debug_objects_enabled) |
719 | __debug_check_no_obj_freed(address, size); |
720 | } |
721 | #endif |
722 | |
723 | #ifdef CONFIG_DEBUG_FS |
724 | |
725 | static int debug_stats_show(struct seq_file *m, void *v) |
726 | { |
727 | seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); |
728 | seq_printf(m, "warnings :%d\n", debug_objects_warnings); |
729 | seq_printf(m, "fixups :%d\n", debug_objects_fixups); |
730 | seq_printf(m, "pool_free :%d\n", obj_pool_free); |
731 | seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); |
732 | seq_printf(m, "pool_used :%d\n", obj_pool_used); |
733 | seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); |
734 | return 0; |
735 | } |
736 | |
737 | static int debug_stats_open(struct inode *inode, struct file *filp) |
738 | { |
739 | return single_open(filp, debug_stats_show, NULL); |
740 | } |
741 | |
742 | static const struct file_operations debug_stats_fops = { |
743 | .open = debug_stats_open, |
744 | .read = seq_read, |
745 | .llseek = seq_lseek, |
746 | .release = single_release, |
747 | }; |
748 | |
749 | static int __init debug_objects_init_debugfs(void) |
750 | { |
751 | struct dentry *dbgdir, *dbgstats; |
752 | |
753 | if (!debug_objects_enabled) |
754 | return 0; |
755 | |
756 | dbgdir = debugfs_create_dir("debug_objects", NULL); |
757 | if (!dbgdir) |
758 | return -ENOMEM; |
759 | |
760 | dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL, |
761 | &debug_stats_fops); |
762 | if (!dbgstats) |
763 | goto err; |
764 | |
765 | return 0; |
766 | |
767 | err: |
768 | debugfs_remove(dbgdir); |
769 | |
770 | return -ENOMEM; |
771 | } |
772 | __initcall(debug_objects_init_debugfs); |
773 | |
774 | #else |
775 | static inline void debug_objects_init_debugfs(void) { } |
776 | #endif |
777 | |
778 | #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST |
779 | |
780 | /* Random data structure for the self test */ |
781 | struct self_test { |
782 | unsigned long dummy1[6]; |
783 | int static_init; |
784 | unsigned long dummy2[3]; |
785 | }; |
786 | |
787 | static __initdata struct debug_obj_descr descr_type_test; |
788 | |
789 | /* |
790 | * fixup_init is called when: |
791 | * - an active object is initialized |
792 | */ |
793 | static int __init fixup_init(void *addr, enum debug_obj_state state) |
794 | { |
795 | struct self_test *obj = addr; |
796 | |
797 | switch (state) { |
798 | case ODEBUG_STATE_ACTIVE: |
799 | debug_object_deactivate(obj, &descr_type_test); |
800 | debug_object_init(obj, &descr_type_test); |
801 | return 1; |
802 | default: |
803 | return 0; |
804 | } |
805 | } |
806 | |
807 | /* |
808 | * fixup_activate is called when: |
809 | * - an active object is activated |
810 | * - an unknown object is activated (might be a statically initialized object) |
811 | */ |
812 | static int __init fixup_activate(void *addr, enum debug_obj_state state) |
813 | { |
814 | struct self_test *obj = addr; |
815 | |
816 | switch (state) { |
817 | case ODEBUG_STATE_NOTAVAILABLE: |
818 | if (obj->static_init == 1) { |
819 | debug_object_init(obj, &descr_type_test); |
820 | debug_object_activate(obj, &descr_type_test); |
821 | return 0; |
822 | } |
823 | return 1; |
824 | |
825 | case ODEBUG_STATE_ACTIVE: |
826 | debug_object_deactivate(obj, &descr_type_test); |
827 | debug_object_activate(obj, &descr_type_test); |
828 | return 1; |
829 | |
830 | default: |
831 | return 0; |
832 | } |
833 | } |
834 | |
835 | /* |
836 | * fixup_destroy is called when: |
837 | * - an active object is destroyed |
838 | */ |
839 | static int __init fixup_destroy(void *addr, enum debug_obj_state state) |
840 | { |
841 | struct self_test *obj = addr; |
842 | |
843 | switch (state) { |
844 | case ODEBUG_STATE_ACTIVE: |
845 | debug_object_deactivate(obj, &descr_type_test); |
846 | debug_object_destroy(obj, &descr_type_test); |
847 | return 1; |
848 | default: |
849 | return 0; |
850 | } |
851 | } |
852 | |
853 | /* |
854 | * fixup_free is called when: |
855 | * - an active object is freed |
856 | */ |
857 | static int __init fixup_free(void *addr, enum debug_obj_state state) |
858 | { |
859 | struct self_test *obj = addr; |
860 | |
861 | switch (state) { |
862 | case ODEBUG_STATE_ACTIVE: |
863 | debug_object_deactivate(obj, &descr_type_test); |
864 | debug_object_free(obj, &descr_type_test); |
865 | return 1; |
866 | default: |
867 | return 0; |
868 | } |
869 | } |
870 | |
871 | static int __init |
872 | check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) |
873 | { |
874 | struct debug_bucket *db; |
875 | struct debug_obj *obj; |
876 | unsigned long flags; |
877 | int res = -EINVAL; |
878 | |
879 | db = get_bucket((unsigned long) addr); |
880 | |
881 | raw_spin_lock_irqsave(&db->lock, flags); |
882 | |
883 | obj = lookup_object(addr, db); |
884 | if (!obj && state != ODEBUG_STATE_NONE) { |
885 | WARN(1, KERN_ERR "ODEBUG: selftest object not found\n"); |
886 | goto out; |
887 | } |
888 | if (obj && obj->state != state) { |
889 | WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", |
890 | obj->state, state); |
891 | goto out; |
892 | } |
893 | if (fixups != debug_objects_fixups) { |
894 | WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", |
895 | fixups, debug_objects_fixups); |
896 | goto out; |
897 | } |
898 | if (warnings != debug_objects_warnings) { |
899 | WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", |
900 | warnings, debug_objects_warnings); |
901 | goto out; |
902 | } |
903 | res = 0; |
904 | out: |
905 | raw_spin_unlock_irqrestore(&db->lock, flags); |
906 | if (res) |
907 | debug_objects_enabled = 0; |
908 | return res; |
909 | } |
910 | |
911 | static __initdata struct debug_obj_descr descr_type_test = { |
912 | .name = "selftest", |
913 | .fixup_init = fixup_init, |
914 | .fixup_activate = fixup_activate, |
915 | .fixup_destroy = fixup_destroy, |
916 | .fixup_free = fixup_free, |
917 | }; |
918 | |
919 | static __initdata struct self_test obj = { .static_init = 0 }; |
920 | |
921 | static void __init debug_objects_selftest(void) |
922 | { |
923 | int fixups, oldfixups, warnings, oldwarnings; |
924 | unsigned long flags; |
925 | |
926 | local_irq_save(flags); |
927 | |
928 | fixups = oldfixups = debug_objects_fixups; |
929 | warnings = oldwarnings = debug_objects_warnings; |
930 | descr_test = &descr_type_test; |
931 | |
932 | debug_object_init(&obj, &descr_type_test); |
933 | if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) |
934 | goto out; |
935 | debug_object_activate(&obj, &descr_type_test); |
936 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) |
937 | goto out; |
938 | debug_object_activate(&obj, &descr_type_test); |
939 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings)) |
940 | goto out; |
941 | debug_object_deactivate(&obj, &descr_type_test); |
942 | if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings)) |
943 | goto out; |
944 | debug_object_destroy(&obj, &descr_type_test); |
945 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings)) |
946 | goto out; |
947 | debug_object_init(&obj, &descr_type_test); |
948 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) |
949 | goto out; |
950 | debug_object_activate(&obj, &descr_type_test); |
951 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) |
952 | goto out; |
953 | debug_object_deactivate(&obj, &descr_type_test); |
954 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) |
955 | goto out; |
956 | debug_object_free(&obj, &descr_type_test); |
957 | if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) |
958 | goto out; |
959 | |
960 | obj.static_init = 1; |
961 | debug_object_activate(&obj, &descr_type_test); |
962 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) |
963 | goto out; |
964 | debug_object_init(&obj, &descr_type_test); |
965 | if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings)) |
966 | goto out; |
967 | debug_object_free(&obj, &descr_type_test); |
968 | if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) |
969 | goto out; |
970 | |
971 | #ifdef CONFIG_DEBUG_OBJECTS_FREE |
972 | debug_object_init(&obj, &descr_type_test); |
973 | if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) |
974 | goto out; |
975 | debug_object_activate(&obj, &descr_type_test); |
976 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) |
977 | goto out; |
978 | __debug_check_no_obj_freed(&obj, sizeof(obj)); |
979 | if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings)) |
980 | goto out; |
981 | #endif |
982 | printk(KERN_INFO "ODEBUG: selftest passed\n"); |
983 | |
984 | out: |
985 | debug_objects_fixups = oldfixups; |
986 | debug_objects_warnings = oldwarnings; |
987 | descr_test = NULL; |
988 | |
989 | local_irq_restore(flags); |
990 | } |
991 | #else |
992 | static inline void debug_objects_selftest(void) { } |
993 | #endif |
994 | |
995 | /* |
996 | * Called during early boot to initialize the hash buckets and link |
997 | * the static object pool objects into the poll list. After this call |
998 | * the object tracker is fully operational. |
999 | */ |
1000 | void __init debug_objects_early_init(void) |
1001 | { |
1002 | int i; |
1003 | |
1004 | for (i = 0; i < ODEBUG_HASH_SIZE; i++) |
1005 | raw_spin_lock_init(&obj_hash[i].lock); |
1006 | |
1007 | for (i = 0; i < ODEBUG_POOL_SIZE; i++) |
1008 | hlist_add_head(&obj_static_pool[i].node, &obj_pool); |
1009 | } |
1010 | |
1011 | /* |
1012 | * Convert the statically allocated objects to dynamic ones: |
1013 | */ |
1014 | static int __init debug_objects_replace_static_objects(void) |
1015 | { |
1016 | struct debug_bucket *db = obj_hash; |
1017 | struct hlist_node *node, *tmp; |
1018 | struct debug_obj *obj, *new; |
1019 | HLIST_HEAD(objects); |
1020 | int i, cnt = 0; |
1021 | |
1022 | for (i = 0; i < ODEBUG_POOL_SIZE; i++) { |
1023 | obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL); |
1024 | if (!obj) |
1025 | goto free; |
1026 | hlist_add_head(&obj->node, &objects); |
1027 | } |
1028 | |
1029 | /* |
1030 | * When debug_objects_mem_init() is called we know that only |
1031 | * one CPU is up, so disabling interrupts is enough |
1032 | * protection. This avoids the lockdep hell of lock ordering. |
1033 | */ |
1034 | local_irq_disable(); |
1035 | |
1036 | /* Remove the statically allocated objects from the pool */ |
1037 | hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node) |
1038 | hlist_del(&obj->node); |
1039 | /* Move the allocated objects to the pool */ |
1040 | hlist_move_list(&objects, &obj_pool); |
1041 | |
1042 | /* Replace the active object references */ |
1043 | for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { |
1044 | hlist_move_list(&db->list, &objects); |
1045 | |
1046 | hlist_for_each_entry(obj, node, &objects, node) { |
1047 | new = hlist_entry(obj_pool.first, typeof(*obj), node); |
1048 | hlist_del(&new->node); |
1049 | /* copy object data */ |
1050 | *new = *obj; |
1051 | hlist_add_head(&new->node, &db->list); |
1052 | cnt++; |
1053 | } |
1054 | } |
1055 | |
1056 | printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt, |
1057 | obj_pool_used); |
1058 | local_irq_enable(); |
1059 | return 0; |
1060 | free: |
1061 | hlist_for_each_entry_safe(obj, node, tmp, &objects, node) { |
1062 | hlist_del(&obj->node); |
1063 | kmem_cache_free(obj_cache, obj); |
1064 | } |
1065 | return -ENOMEM; |
1066 | } |
1067 | |
1068 | /* |
1069 | * Called after the kmem_caches are functional to setup a dedicated |
1070 | * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag |
1071 | * prevents that the debug code is called on kmem_cache_free() for the |
1072 | * debug tracker objects to avoid recursive calls. |
1073 | */ |
1074 | void __init debug_objects_mem_init(void) |
1075 | { |
1076 | if (!debug_objects_enabled) |
1077 | return; |
1078 | |
1079 | obj_cache = kmem_cache_create("debug_objects_cache", |
1080 | sizeof (struct debug_obj), 0, |
1081 | SLAB_DEBUG_OBJECTS, NULL); |
1082 | |
1083 | if (!obj_cache || debug_objects_replace_static_objects()) { |
1084 | debug_objects_enabled = 0; |
1085 | if (obj_cache) |
1086 | kmem_cache_destroy(obj_cache); |
1087 | printk(KERN_WARNING "ODEBUG: out of memory.\n"); |
1088 | } else |
1089 | debug_objects_selftest(); |
1090 | } |
1091 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9