Root/
Source at commit cdde9cf73945d547acd3e96f9508c79e84ad0bf1 created 12 years 9 months ago. By Maarten ter Huurne, MMC: JZ4740: Added support for CPU frequency changing | |
---|---|
1 | /* |
2 | * Generic infrastructure for lifetime debugging of objects. |
3 | * |
4 | * Started by Thomas Gleixner |
5 | * |
6 | * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de> |
7 | * |
8 | * For licencing details see kernel-base/COPYING |
9 | */ |
10 | #include <linux/debugobjects.h> |
11 | #include <linux/interrupt.h> |
12 | #include <linux/sched.h> |
13 | #include <linux/seq_file.h> |
14 | #include <linux/debugfs.h> |
15 | #include <linux/slab.h> |
16 | #include <linux/hash.h> |
17 | |
18 | #define ODEBUG_HASH_BITS 14 |
19 | #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS) |
20 | |
21 | #define ODEBUG_POOL_SIZE 512 |
22 | #define ODEBUG_POOL_MIN_LEVEL 256 |
23 | |
24 | #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT |
25 | #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) |
26 | #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) |
27 | |
28 | struct debug_bucket { |
29 | struct hlist_head list; |
30 | raw_spinlock_t lock; |
31 | }; |
32 | |
33 | static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; |
34 | |
35 | static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; |
36 | |
37 | static DEFINE_RAW_SPINLOCK(pool_lock); |
38 | |
39 | static HLIST_HEAD(obj_pool); |
40 | |
41 | static int obj_pool_min_free = ODEBUG_POOL_SIZE; |
42 | static int obj_pool_free = ODEBUG_POOL_SIZE; |
43 | static int obj_pool_used; |
44 | static int obj_pool_max_used; |
45 | static struct kmem_cache *obj_cache; |
46 | |
47 | static int debug_objects_maxchain __read_mostly; |
48 | static int debug_objects_fixups __read_mostly; |
49 | static int debug_objects_warnings __read_mostly; |
50 | static int debug_objects_enabled __read_mostly |
51 | = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT; |
52 | |
53 | static struct debug_obj_descr *descr_test __read_mostly; |
54 | |
55 | static void free_obj_work(struct work_struct *work); |
56 | static DECLARE_WORK(debug_obj_work, free_obj_work); |
57 | |
58 | static int __init enable_object_debug(char *str) |
59 | { |
60 | debug_objects_enabled = 1; |
61 | return 0; |
62 | } |
63 | |
64 | static int __init disable_object_debug(char *str) |
65 | { |
66 | debug_objects_enabled = 0; |
67 | return 0; |
68 | } |
69 | |
70 | early_param("debug_objects", enable_object_debug); |
71 | early_param("no_debug_objects", disable_object_debug); |
72 | |
73 | static const char *obj_states[ODEBUG_STATE_MAX] = { |
74 | [ODEBUG_STATE_NONE] = "none", |
75 | [ODEBUG_STATE_INIT] = "initialized", |
76 | [ODEBUG_STATE_INACTIVE] = "inactive", |
77 | [ODEBUG_STATE_ACTIVE] = "active", |
78 | [ODEBUG_STATE_DESTROYED] = "destroyed", |
79 | [ODEBUG_STATE_NOTAVAILABLE] = "not available", |
80 | }; |
81 | |
82 | static void fill_pool(void) |
83 | { |
84 | gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; |
85 | struct debug_obj *new; |
86 | unsigned long flags; |
87 | |
88 | if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) |
89 | return; |
90 | |
91 | if (unlikely(!obj_cache)) |
92 | return; |
93 | |
94 | while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) { |
95 | |
96 | new = kmem_cache_zalloc(obj_cache, gfp); |
97 | if (!new) |
98 | return; |
99 | |
100 | raw_spin_lock_irqsave(&pool_lock, flags); |
101 | hlist_add_head(&new->node, &obj_pool); |
102 | obj_pool_free++; |
103 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
104 | } |
105 | } |
106 | |
107 | /* |
108 | * Lookup an object in the hash bucket. |
109 | */ |
110 | static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) |
111 | { |
112 | struct hlist_node *node; |
113 | struct debug_obj *obj; |
114 | int cnt = 0; |
115 | |
116 | hlist_for_each_entry(obj, node, &b->list, node) { |
117 | cnt++; |
118 | if (obj->object == addr) |
119 | return obj; |
120 | } |
121 | if (cnt > debug_objects_maxchain) |
122 | debug_objects_maxchain = cnt; |
123 | |
124 | return NULL; |
125 | } |
126 | |
127 | /* |
128 | * Allocate a new object. If the pool is empty, switch off the debugger. |
129 | * Must be called with interrupts disabled. |
130 | */ |
131 | static struct debug_obj * |
132 | alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) |
133 | { |
134 | struct debug_obj *obj = NULL; |
135 | |
136 | raw_spin_lock(&pool_lock); |
137 | if (obj_pool.first) { |
138 | obj = hlist_entry(obj_pool.first, typeof(*obj), node); |
139 | |
140 | obj->object = addr; |
141 | obj->descr = descr; |
142 | obj->state = ODEBUG_STATE_NONE; |
143 | obj->astate = 0; |
144 | hlist_del(&obj->node); |
145 | |
146 | hlist_add_head(&obj->node, &b->list); |
147 | |
148 | obj_pool_used++; |
149 | if (obj_pool_used > obj_pool_max_used) |
150 | obj_pool_max_used = obj_pool_used; |
151 | |
152 | obj_pool_free--; |
153 | if (obj_pool_free < obj_pool_min_free) |
154 | obj_pool_min_free = obj_pool_free; |
155 | } |
156 | raw_spin_unlock(&pool_lock); |
157 | |
158 | return obj; |
159 | } |
160 | |
161 | /* |
162 | * workqueue function to free objects. |
163 | */ |
164 | static void free_obj_work(struct work_struct *work) |
165 | { |
166 | struct debug_obj *obj; |
167 | unsigned long flags; |
168 | |
169 | raw_spin_lock_irqsave(&pool_lock, flags); |
170 | while (obj_pool_free > ODEBUG_POOL_SIZE) { |
171 | obj = hlist_entry(obj_pool.first, typeof(*obj), node); |
172 | hlist_del(&obj->node); |
173 | obj_pool_free--; |
174 | /* |
175 | * We release pool_lock across kmem_cache_free() to |
176 | * avoid contention on pool_lock. |
177 | */ |
178 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
179 | kmem_cache_free(obj_cache, obj); |
180 | raw_spin_lock_irqsave(&pool_lock, flags); |
181 | } |
182 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
183 | } |
184 | |
185 | /* |
186 | * Put the object back into the pool and schedule work to free objects |
187 | * if necessary. |
188 | */ |
189 | static void free_object(struct debug_obj *obj) |
190 | { |
191 | unsigned long flags; |
192 | int sched = 0; |
193 | |
194 | raw_spin_lock_irqsave(&pool_lock, flags); |
195 | /* |
196 | * schedule work when the pool is filled and the cache is |
197 | * initialized: |
198 | */ |
199 | if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache) |
200 | sched = keventd_up() && !work_pending(&debug_obj_work); |
201 | hlist_add_head(&obj->node, &obj_pool); |
202 | obj_pool_free++; |
203 | obj_pool_used--; |
204 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
205 | if (sched) |
206 | schedule_work(&debug_obj_work); |
207 | } |
208 | |
209 | /* |
210 | * We run out of memory. That means we probably have tons of objects |
211 | * allocated. |
212 | */ |
213 | static void debug_objects_oom(void) |
214 | { |
215 | struct debug_bucket *db = obj_hash; |
216 | struct hlist_node *node, *tmp; |
217 | HLIST_HEAD(freelist); |
218 | struct debug_obj *obj; |
219 | unsigned long flags; |
220 | int i; |
221 | |
222 | printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n"); |
223 | |
224 | for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { |
225 | raw_spin_lock_irqsave(&db->lock, flags); |
226 | hlist_move_list(&db->list, &freelist); |
227 | raw_spin_unlock_irqrestore(&db->lock, flags); |
228 | |
229 | /* Now free them */ |
230 | hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { |
231 | hlist_del(&obj->node); |
232 | free_object(obj); |
233 | } |
234 | } |
235 | } |
236 | |
237 | /* |
238 | * We use the pfn of the address for the hash. That way we can check |
239 | * for freed objects simply by checking the affected bucket. |
240 | */ |
241 | static struct debug_bucket *get_bucket(unsigned long addr) |
242 | { |
243 | unsigned long hash; |
244 | |
245 | hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS); |
246 | return &obj_hash[hash]; |
247 | } |
248 | |
249 | static void debug_print_object(struct debug_obj *obj, char *msg) |
250 | { |
251 | struct debug_obj_descr *descr = obj->descr; |
252 | static int limit; |
253 | |
254 | if (limit < 5 && descr != descr_test) { |
255 | void *hint = descr->debug_hint ? |
256 | descr->debug_hint(obj->object) : NULL; |
257 | limit++; |
258 | WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) " |
259 | "object type: %s hint: %pS\n", |
260 | msg, obj_states[obj->state], obj->astate, |
261 | descr->name, hint); |
262 | } |
263 | debug_objects_warnings++; |
264 | } |
265 | |
266 | /* |
267 | * Try to repair the damage, so we have a better chance to get useful |
268 | * debug output. |
269 | */ |
270 | static int |
271 | debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state), |
272 | void * addr, enum debug_obj_state state) |
273 | { |
274 | int fixed = 0; |
275 | |
276 | if (fixup) |
277 | fixed = fixup(addr, state); |
278 | debug_objects_fixups += fixed; |
279 | return fixed; |
280 | } |
281 | |
282 | static void debug_object_is_on_stack(void *addr, int onstack) |
283 | { |
284 | int is_on_stack; |
285 | static int limit; |
286 | |
287 | if (limit > 4) |
288 | return; |
289 | |
290 | is_on_stack = object_is_on_stack(addr); |
291 | if (is_on_stack == onstack) |
292 | return; |
293 | |
294 | limit++; |
295 | if (is_on_stack) |
296 | printk(KERN_WARNING |
297 | "ODEBUG: object is on stack, but not annotated\n"); |
298 | else |
299 | printk(KERN_WARNING |
300 | "ODEBUG: object is not on stack, but annotated\n"); |
301 | WARN_ON(1); |
302 | } |
303 | |
304 | static void |
305 | __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) |
306 | { |
307 | enum debug_obj_state state; |
308 | struct debug_bucket *db; |
309 | struct debug_obj *obj; |
310 | unsigned long flags; |
311 | |
312 | fill_pool(); |
313 | |
314 | db = get_bucket((unsigned long) addr); |
315 | |
316 | raw_spin_lock_irqsave(&db->lock, flags); |
317 | |
318 | obj = lookup_object(addr, db); |
319 | if (!obj) { |
320 | obj = alloc_object(addr, db, descr); |
321 | if (!obj) { |
322 | debug_objects_enabled = 0; |
323 | raw_spin_unlock_irqrestore(&db->lock, flags); |
324 | debug_objects_oom(); |
325 | return; |
326 | } |
327 | debug_object_is_on_stack(addr, onstack); |
328 | } |
329 | |
330 | switch (obj->state) { |
331 | case ODEBUG_STATE_NONE: |
332 | case ODEBUG_STATE_INIT: |
333 | case ODEBUG_STATE_INACTIVE: |
334 | obj->state = ODEBUG_STATE_INIT; |
335 | break; |
336 | |
337 | case ODEBUG_STATE_ACTIVE: |
338 | debug_print_object(obj, "init"); |
339 | state = obj->state; |
340 | raw_spin_unlock_irqrestore(&db->lock, flags); |
341 | debug_object_fixup(descr->fixup_init, addr, state); |
342 | return; |
343 | |
344 | case ODEBUG_STATE_DESTROYED: |
345 | debug_print_object(obj, "init"); |
346 | break; |
347 | default: |
348 | break; |
349 | } |
350 | |
351 | raw_spin_unlock_irqrestore(&db->lock, flags); |
352 | } |
353 | |
354 | /** |
355 | * debug_object_init - debug checks when an object is initialized |
356 | * @addr: address of the object |
357 | * @descr: pointer to an object specific debug description structure |
358 | */ |
359 | void debug_object_init(void *addr, struct debug_obj_descr *descr) |
360 | { |
361 | if (!debug_objects_enabled) |
362 | return; |
363 | |
364 | __debug_object_init(addr, descr, 0); |
365 | } |
366 | |
367 | /** |
368 | * debug_object_init_on_stack - debug checks when an object on stack is |
369 | * initialized |
370 | * @addr: address of the object |
371 | * @descr: pointer to an object specific debug description structure |
372 | */ |
373 | void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) |
374 | { |
375 | if (!debug_objects_enabled) |
376 | return; |
377 | |
378 | __debug_object_init(addr, descr, 1); |
379 | } |
380 | |
381 | /** |
382 | * debug_object_activate - debug checks when an object is activated |
383 | * @addr: address of the object |
384 | * @descr: pointer to an object specific debug description structure |
385 | */ |
386 | void debug_object_activate(void *addr, struct debug_obj_descr *descr) |
387 | { |
388 | enum debug_obj_state state; |
389 | struct debug_bucket *db; |
390 | struct debug_obj *obj; |
391 | unsigned long flags; |
392 | struct debug_obj o = { .object = addr, |
393 | .state = ODEBUG_STATE_NOTAVAILABLE, |
394 | .descr = descr }; |
395 | |
396 | if (!debug_objects_enabled) |
397 | return; |
398 | |
399 | db = get_bucket((unsigned long) addr); |
400 | |
401 | raw_spin_lock_irqsave(&db->lock, flags); |
402 | |
403 | obj = lookup_object(addr, db); |
404 | if (obj) { |
405 | switch (obj->state) { |
406 | case ODEBUG_STATE_INIT: |
407 | case ODEBUG_STATE_INACTIVE: |
408 | obj->state = ODEBUG_STATE_ACTIVE; |
409 | break; |
410 | |
411 | case ODEBUG_STATE_ACTIVE: |
412 | debug_print_object(obj, "activate"); |
413 | state = obj->state; |
414 | raw_spin_unlock_irqrestore(&db->lock, flags); |
415 | debug_object_fixup(descr->fixup_activate, addr, state); |
416 | return; |
417 | |
418 | case ODEBUG_STATE_DESTROYED: |
419 | debug_print_object(obj, "activate"); |
420 | break; |
421 | default: |
422 | break; |
423 | } |
424 | raw_spin_unlock_irqrestore(&db->lock, flags); |
425 | return; |
426 | } |
427 | |
428 | raw_spin_unlock_irqrestore(&db->lock, flags); |
429 | /* |
430 | * This happens when a static object is activated. We |
431 | * let the type specific code decide whether this is |
432 | * true or not. |
433 | */ |
434 | if (debug_object_fixup(descr->fixup_activate, addr, |
435 | ODEBUG_STATE_NOTAVAILABLE)) |
436 | debug_print_object(&o, "activate"); |
437 | } |
438 | |
439 | /** |
440 | * debug_object_deactivate - debug checks when an object is deactivated |
441 | * @addr: address of the object |
442 | * @descr: pointer to an object specific debug description structure |
443 | */ |
444 | void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) |
445 | { |
446 | struct debug_bucket *db; |
447 | struct debug_obj *obj; |
448 | unsigned long flags; |
449 | |
450 | if (!debug_objects_enabled) |
451 | return; |
452 | |
453 | db = get_bucket((unsigned long) addr); |
454 | |
455 | raw_spin_lock_irqsave(&db->lock, flags); |
456 | |
457 | obj = lookup_object(addr, db); |
458 | if (obj) { |
459 | switch (obj->state) { |
460 | case ODEBUG_STATE_INIT: |
461 | case ODEBUG_STATE_INACTIVE: |
462 | case ODEBUG_STATE_ACTIVE: |
463 | if (!obj->astate) |
464 | obj->state = ODEBUG_STATE_INACTIVE; |
465 | else |
466 | debug_print_object(obj, "deactivate"); |
467 | break; |
468 | |
469 | case ODEBUG_STATE_DESTROYED: |
470 | debug_print_object(obj, "deactivate"); |
471 | break; |
472 | default: |
473 | break; |
474 | } |
475 | } else { |
476 | struct debug_obj o = { .object = addr, |
477 | .state = ODEBUG_STATE_NOTAVAILABLE, |
478 | .descr = descr }; |
479 | |
480 | debug_print_object(&o, "deactivate"); |
481 | } |
482 | |
483 | raw_spin_unlock_irqrestore(&db->lock, flags); |
484 | } |
485 | |
486 | /** |
487 | * debug_object_destroy - debug checks when an object is destroyed |
488 | * @addr: address of the object |
489 | * @descr: pointer to an object specific debug description structure |
490 | */ |
491 | void debug_object_destroy(void *addr, struct debug_obj_descr *descr) |
492 | { |
493 | enum debug_obj_state state; |
494 | struct debug_bucket *db; |
495 | struct debug_obj *obj; |
496 | unsigned long flags; |
497 | |
498 | if (!debug_objects_enabled) |
499 | return; |
500 | |
501 | db = get_bucket((unsigned long) addr); |
502 | |
503 | raw_spin_lock_irqsave(&db->lock, flags); |
504 | |
505 | obj = lookup_object(addr, db); |
506 | if (!obj) |
507 | goto out_unlock; |
508 | |
509 | switch (obj->state) { |
510 | case ODEBUG_STATE_NONE: |
511 | case ODEBUG_STATE_INIT: |
512 | case ODEBUG_STATE_INACTIVE: |
513 | obj->state = ODEBUG_STATE_DESTROYED; |
514 | break; |
515 | case ODEBUG_STATE_ACTIVE: |
516 | debug_print_object(obj, "destroy"); |
517 | state = obj->state; |
518 | raw_spin_unlock_irqrestore(&db->lock, flags); |
519 | debug_object_fixup(descr->fixup_destroy, addr, state); |
520 | return; |
521 | |
522 | case ODEBUG_STATE_DESTROYED: |
523 | debug_print_object(obj, "destroy"); |
524 | break; |
525 | default: |
526 | break; |
527 | } |
528 | out_unlock: |
529 | raw_spin_unlock_irqrestore(&db->lock, flags); |
530 | } |
531 | |
532 | /** |
533 | * debug_object_free - debug checks when an object is freed |
534 | * @addr: address of the object |
535 | * @descr: pointer to an object specific debug description structure |
536 | */ |
537 | void debug_object_free(void *addr, struct debug_obj_descr *descr) |
538 | { |
539 | enum debug_obj_state state; |
540 | struct debug_bucket *db; |
541 | struct debug_obj *obj; |
542 | unsigned long flags; |
543 | |
544 | if (!debug_objects_enabled) |
545 | return; |
546 | |
547 | db = get_bucket((unsigned long) addr); |
548 | |
549 | raw_spin_lock_irqsave(&db->lock, flags); |
550 | |
551 | obj = lookup_object(addr, db); |
552 | if (!obj) |
553 | goto out_unlock; |
554 | |
555 | switch (obj->state) { |
556 | case ODEBUG_STATE_ACTIVE: |
557 | debug_print_object(obj, "free"); |
558 | state = obj->state; |
559 | raw_spin_unlock_irqrestore(&db->lock, flags); |
560 | debug_object_fixup(descr->fixup_free, addr, state); |
561 | return; |
562 | default: |
563 | hlist_del(&obj->node); |
564 | raw_spin_unlock_irqrestore(&db->lock, flags); |
565 | free_object(obj); |
566 | return; |
567 | } |
568 | out_unlock: |
569 | raw_spin_unlock_irqrestore(&db->lock, flags); |
570 | } |
571 | |
572 | /** |
573 | * debug_object_assert_init - debug checks when object should be init-ed |
574 | * @addr: address of the object |
575 | * @descr: pointer to an object specific debug description structure |
576 | */ |
577 | void debug_object_assert_init(void *addr, struct debug_obj_descr *descr) |
578 | { |
579 | struct debug_bucket *db; |
580 | struct debug_obj *obj; |
581 | unsigned long flags; |
582 | |
583 | if (!debug_objects_enabled) |
584 | return; |
585 | |
586 | db = get_bucket((unsigned long) addr); |
587 | |
588 | raw_spin_lock_irqsave(&db->lock, flags); |
589 | |
590 | obj = lookup_object(addr, db); |
591 | if (!obj) { |
592 | struct debug_obj o = { .object = addr, |
593 | .state = ODEBUG_STATE_NOTAVAILABLE, |
594 | .descr = descr }; |
595 | |
596 | raw_spin_unlock_irqrestore(&db->lock, flags); |
597 | /* |
598 | * Maybe the object is static. Let the type specific |
599 | * code decide what to do. |
600 | */ |
601 | if (debug_object_fixup(descr->fixup_assert_init, addr, |
602 | ODEBUG_STATE_NOTAVAILABLE)) |
603 | debug_print_object(&o, "assert_init"); |
604 | return; |
605 | } |
606 | |
607 | raw_spin_unlock_irqrestore(&db->lock, flags); |
608 | } |
609 | |
610 | /** |
611 | * debug_object_active_state - debug checks object usage state machine |
612 | * @addr: address of the object |
613 | * @descr: pointer to an object specific debug description structure |
614 | * @expect: expected state |
615 | * @next: state to move to if expected state is found |
616 | */ |
617 | void |
618 | debug_object_active_state(void *addr, struct debug_obj_descr *descr, |
619 | unsigned int expect, unsigned int next) |
620 | { |
621 | struct debug_bucket *db; |
622 | struct debug_obj *obj; |
623 | unsigned long flags; |
624 | |
625 | if (!debug_objects_enabled) |
626 | return; |
627 | |
628 | db = get_bucket((unsigned long) addr); |
629 | |
630 | raw_spin_lock_irqsave(&db->lock, flags); |
631 | |
632 | obj = lookup_object(addr, db); |
633 | if (obj) { |
634 | switch (obj->state) { |
635 | case ODEBUG_STATE_ACTIVE: |
636 | if (obj->astate == expect) |
637 | obj->astate = next; |
638 | else |
639 | debug_print_object(obj, "active_state"); |
640 | break; |
641 | |
642 | default: |
643 | debug_print_object(obj, "active_state"); |
644 | break; |
645 | } |
646 | } else { |
647 | struct debug_obj o = { .object = addr, |
648 | .state = ODEBUG_STATE_NOTAVAILABLE, |
649 | .descr = descr }; |
650 | |
651 | debug_print_object(&o, "active_state"); |
652 | } |
653 | |
654 | raw_spin_unlock_irqrestore(&db->lock, flags); |
655 | } |
656 | |
657 | #ifdef CONFIG_DEBUG_OBJECTS_FREE |
658 | static void __debug_check_no_obj_freed(const void *address, unsigned long size) |
659 | { |
660 | unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; |
661 | struct hlist_node *node, *tmp; |
662 | HLIST_HEAD(freelist); |
663 | struct debug_obj_descr *descr; |
664 | enum debug_obj_state state; |
665 | struct debug_bucket *db; |
666 | struct debug_obj *obj; |
667 | int cnt; |
668 | |
669 | saddr = (unsigned long) address; |
670 | eaddr = saddr + size; |
671 | paddr = saddr & ODEBUG_CHUNK_MASK; |
672 | chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); |
673 | chunks >>= ODEBUG_CHUNK_SHIFT; |
674 | |
675 | for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) { |
676 | db = get_bucket(paddr); |
677 | |
678 | repeat: |
679 | cnt = 0; |
680 | raw_spin_lock_irqsave(&db->lock, flags); |
681 | hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { |
682 | cnt++; |
683 | oaddr = (unsigned long) obj->object; |
684 | if (oaddr < saddr || oaddr >= eaddr) |
685 | continue; |
686 | |
687 | switch (obj->state) { |
688 | case ODEBUG_STATE_ACTIVE: |
689 | debug_print_object(obj, "free"); |
690 | descr = obj->descr; |
691 | state = obj->state; |
692 | raw_spin_unlock_irqrestore(&db->lock, flags); |
693 | debug_object_fixup(descr->fixup_free, |
694 | (void *) oaddr, state); |
695 | goto repeat; |
696 | default: |
697 | hlist_del(&obj->node); |
698 | hlist_add_head(&obj->node, &freelist); |
699 | break; |
700 | } |
701 | } |
702 | raw_spin_unlock_irqrestore(&db->lock, flags); |
703 | |
704 | /* Now free them */ |
705 | hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { |
706 | hlist_del(&obj->node); |
707 | free_object(obj); |
708 | } |
709 | |
710 | if (cnt > debug_objects_maxchain) |
711 | debug_objects_maxchain = cnt; |
712 | } |
713 | } |
714 | |
715 | void debug_check_no_obj_freed(const void *address, unsigned long size) |
716 | { |
717 | if (debug_objects_enabled) |
718 | __debug_check_no_obj_freed(address, size); |
719 | } |
720 | #endif |
721 | |
722 | #ifdef CONFIG_DEBUG_FS |
723 | |
724 | static int debug_stats_show(struct seq_file *m, void *v) |
725 | { |
726 | seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); |
727 | seq_printf(m, "warnings :%d\n", debug_objects_warnings); |
728 | seq_printf(m, "fixups :%d\n", debug_objects_fixups); |
729 | seq_printf(m, "pool_free :%d\n", obj_pool_free); |
730 | seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); |
731 | seq_printf(m, "pool_used :%d\n", obj_pool_used); |
732 | seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); |
733 | return 0; |
734 | } |
735 | |
736 | static int debug_stats_open(struct inode *inode, struct file *filp) |
737 | { |
738 | return single_open(filp, debug_stats_show, NULL); |
739 | } |
740 | |
741 | static const struct file_operations debug_stats_fops = { |
742 | .open = debug_stats_open, |
743 | .read = seq_read, |
744 | .llseek = seq_lseek, |
745 | .release = single_release, |
746 | }; |
747 | |
748 | static int __init debug_objects_init_debugfs(void) |
749 | { |
750 | struct dentry *dbgdir, *dbgstats; |
751 | |
752 | if (!debug_objects_enabled) |
753 | return 0; |
754 | |
755 | dbgdir = debugfs_create_dir("debug_objects", NULL); |
756 | if (!dbgdir) |
757 | return -ENOMEM; |
758 | |
759 | dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL, |
760 | &debug_stats_fops); |
761 | if (!dbgstats) |
762 | goto err; |
763 | |
764 | return 0; |
765 | |
766 | err: |
767 | debugfs_remove(dbgdir); |
768 | |
769 | return -ENOMEM; |
770 | } |
771 | __initcall(debug_objects_init_debugfs); |
772 | |
773 | #else |
774 | static inline void debug_objects_init_debugfs(void) { } |
775 | #endif |
776 | |
777 | #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST |
778 | |
779 | /* Random data structure for the self test */ |
780 | struct self_test { |
781 | unsigned long dummy1[6]; |
782 | int static_init; |
783 | unsigned long dummy2[3]; |
784 | }; |
785 | |
786 | static __initdata struct debug_obj_descr descr_type_test; |
787 | |
788 | /* |
789 | * fixup_init is called when: |
790 | * - an active object is initialized |
791 | */ |
792 | static int __init fixup_init(void *addr, enum debug_obj_state state) |
793 | { |
794 | struct self_test *obj = addr; |
795 | |
796 | switch (state) { |
797 | case ODEBUG_STATE_ACTIVE: |
798 | debug_object_deactivate(obj, &descr_type_test); |
799 | debug_object_init(obj, &descr_type_test); |
800 | return 1; |
801 | default: |
802 | return 0; |
803 | } |
804 | } |
805 | |
806 | /* |
807 | * fixup_activate is called when: |
808 | * - an active object is activated |
809 | * - an unknown object is activated (might be a statically initialized object) |
810 | */ |
811 | static int __init fixup_activate(void *addr, enum debug_obj_state state) |
812 | { |
813 | struct self_test *obj = addr; |
814 | |
815 | switch (state) { |
816 | case ODEBUG_STATE_NOTAVAILABLE: |
817 | if (obj->static_init == 1) { |
818 | debug_object_init(obj, &descr_type_test); |
819 | debug_object_activate(obj, &descr_type_test); |
820 | return 0; |
821 | } |
822 | return 1; |
823 | |
824 | case ODEBUG_STATE_ACTIVE: |
825 | debug_object_deactivate(obj, &descr_type_test); |
826 | debug_object_activate(obj, &descr_type_test); |
827 | return 1; |
828 | |
829 | default: |
830 | return 0; |
831 | } |
832 | } |
833 | |
834 | /* |
835 | * fixup_destroy is called when: |
836 | * - an active object is destroyed |
837 | */ |
838 | static int __init fixup_destroy(void *addr, enum debug_obj_state state) |
839 | { |
840 | struct self_test *obj = addr; |
841 | |
842 | switch (state) { |
843 | case ODEBUG_STATE_ACTIVE: |
844 | debug_object_deactivate(obj, &descr_type_test); |
845 | debug_object_destroy(obj, &descr_type_test); |
846 | return 1; |
847 | default: |
848 | return 0; |
849 | } |
850 | } |
851 | |
852 | /* |
853 | * fixup_free is called when: |
854 | * - an active object is freed |
855 | */ |
856 | static int __init fixup_free(void *addr, enum debug_obj_state state) |
857 | { |
858 | struct self_test *obj = addr; |
859 | |
860 | switch (state) { |
861 | case ODEBUG_STATE_ACTIVE: |
862 | debug_object_deactivate(obj, &descr_type_test); |
863 | debug_object_free(obj, &descr_type_test); |
864 | return 1; |
865 | default: |
866 | return 0; |
867 | } |
868 | } |
869 | |
870 | static int __init |
871 | check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) |
872 | { |
873 | struct debug_bucket *db; |
874 | struct debug_obj *obj; |
875 | unsigned long flags; |
876 | int res = -EINVAL; |
877 | |
878 | db = get_bucket((unsigned long) addr); |
879 | |
880 | raw_spin_lock_irqsave(&db->lock, flags); |
881 | |
882 | obj = lookup_object(addr, db); |
883 | if (!obj && state != ODEBUG_STATE_NONE) { |
884 | WARN(1, KERN_ERR "ODEBUG: selftest object not found\n"); |
885 | goto out; |
886 | } |
887 | if (obj && obj->state != state) { |
888 | WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", |
889 | obj->state, state); |
890 | goto out; |
891 | } |
892 | if (fixups != debug_objects_fixups) { |
893 | WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", |
894 | fixups, debug_objects_fixups); |
895 | goto out; |
896 | } |
897 | if (warnings != debug_objects_warnings) { |
898 | WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", |
899 | warnings, debug_objects_warnings); |
900 | goto out; |
901 | } |
902 | res = 0; |
903 | out: |
904 | raw_spin_unlock_irqrestore(&db->lock, flags); |
905 | if (res) |
906 | debug_objects_enabled = 0; |
907 | return res; |
908 | } |
909 | |
910 | static __initdata struct debug_obj_descr descr_type_test = { |
911 | .name = "selftest", |
912 | .fixup_init = fixup_init, |
913 | .fixup_activate = fixup_activate, |
914 | .fixup_destroy = fixup_destroy, |
915 | .fixup_free = fixup_free, |
916 | }; |
917 | |
918 | static __initdata struct self_test obj = { .static_init = 0 }; |
919 | |
920 | static void __init debug_objects_selftest(void) |
921 | { |
922 | int fixups, oldfixups, warnings, oldwarnings; |
923 | unsigned long flags; |
924 | |
925 | local_irq_save(flags); |
926 | |
927 | fixups = oldfixups = debug_objects_fixups; |
928 | warnings = oldwarnings = debug_objects_warnings; |
929 | descr_test = &descr_type_test; |
930 | |
931 | debug_object_init(&obj, &descr_type_test); |
932 | if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) |
933 | goto out; |
934 | debug_object_activate(&obj, &descr_type_test); |
935 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) |
936 | goto out; |
937 | debug_object_activate(&obj, &descr_type_test); |
938 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings)) |
939 | goto out; |
940 | debug_object_deactivate(&obj, &descr_type_test); |
941 | if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings)) |
942 | goto out; |
943 | debug_object_destroy(&obj, &descr_type_test); |
944 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings)) |
945 | goto out; |
946 | debug_object_init(&obj, &descr_type_test); |
947 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) |
948 | goto out; |
949 | debug_object_activate(&obj, &descr_type_test); |
950 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) |
951 | goto out; |
952 | debug_object_deactivate(&obj, &descr_type_test); |
953 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) |
954 | goto out; |
955 | debug_object_free(&obj, &descr_type_test); |
956 | if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) |
957 | goto out; |
958 | |
959 | obj.static_init = 1; |
960 | debug_object_activate(&obj, &descr_type_test); |
961 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) |
962 | goto out; |
963 | debug_object_init(&obj, &descr_type_test); |
964 | if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings)) |
965 | goto out; |
966 | debug_object_free(&obj, &descr_type_test); |
967 | if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) |
968 | goto out; |
969 | |
970 | #ifdef CONFIG_DEBUG_OBJECTS_FREE |
971 | debug_object_init(&obj, &descr_type_test); |
972 | if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) |
973 | goto out; |
974 | debug_object_activate(&obj, &descr_type_test); |
975 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) |
976 | goto out; |
977 | __debug_check_no_obj_freed(&obj, sizeof(obj)); |
978 | if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings)) |
979 | goto out; |
980 | #endif |
981 | printk(KERN_INFO "ODEBUG: selftest passed\n"); |
982 | |
983 | out: |
984 | debug_objects_fixups = oldfixups; |
985 | debug_objects_warnings = oldwarnings; |
986 | descr_test = NULL; |
987 | |
988 | local_irq_restore(flags); |
989 | } |
990 | #else |
991 | static inline void debug_objects_selftest(void) { } |
992 | #endif |
993 | |
994 | /* |
995 | * Called during early boot to initialize the hash buckets and link |
996 | * the static object pool objects into the poll list. After this call |
997 | * the object tracker is fully operational. |
998 | */ |
999 | void __init debug_objects_early_init(void) |
1000 | { |
1001 | int i; |
1002 | |
1003 | for (i = 0; i < ODEBUG_HASH_SIZE; i++) |
1004 | raw_spin_lock_init(&obj_hash[i].lock); |
1005 | |
1006 | for (i = 0; i < ODEBUG_POOL_SIZE; i++) |
1007 | hlist_add_head(&obj_static_pool[i].node, &obj_pool); |
1008 | } |
1009 | |
1010 | /* |
1011 | * Convert the statically allocated objects to dynamic ones: |
1012 | */ |
1013 | static int __init debug_objects_replace_static_objects(void) |
1014 | { |
1015 | struct debug_bucket *db = obj_hash; |
1016 | struct hlist_node *node, *tmp; |
1017 | struct debug_obj *obj, *new; |
1018 | HLIST_HEAD(objects); |
1019 | int i, cnt = 0; |
1020 | |
1021 | for (i = 0; i < ODEBUG_POOL_SIZE; i++) { |
1022 | obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL); |
1023 | if (!obj) |
1024 | goto free; |
1025 | hlist_add_head(&obj->node, &objects); |
1026 | } |
1027 | |
1028 | /* |
1029 | * When debug_objects_mem_init() is called we know that only |
1030 | * one CPU is up, so disabling interrupts is enough |
1031 | * protection. This avoids the lockdep hell of lock ordering. |
1032 | */ |
1033 | local_irq_disable(); |
1034 | |
1035 | /* Remove the statically allocated objects from the pool */ |
1036 | hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node) |
1037 | hlist_del(&obj->node); |
1038 | /* Move the allocated objects to the pool */ |
1039 | hlist_move_list(&objects, &obj_pool); |
1040 | |
1041 | /* Replace the active object references */ |
1042 | for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { |
1043 | hlist_move_list(&db->list, &objects); |
1044 | |
1045 | hlist_for_each_entry(obj, node, &objects, node) { |
1046 | new = hlist_entry(obj_pool.first, typeof(*obj), node); |
1047 | hlist_del(&new->node); |
1048 | /* copy object data */ |
1049 | *new = *obj; |
1050 | hlist_add_head(&new->node, &db->list); |
1051 | cnt++; |
1052 | } |
1053 | } |
1054 | local_irq_enable(); |
1055 | |
1056 | printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt, |
1057 | obj_pool_used); |
1058 | return 0; |
1059 | free: |
1060 | hlist_for_each_entry_safe(obj, node, tmp, &objects, node) { |
1061 | hlist_del(&obj->node); |
1062 | kmem_cache_free(obj_cache, obj); |
1063 | } |
1064 | return -ENOMEM; |
1065 | } |
1066 | |
1067 | /* |
1068 | * Called after the kmem_caches are functional to setup a dedicated |
1069 | * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag |
1070 | * prevents that the debug code is called on kmem_cache_free() for the |
1071 | * debug tracker objects to avoid recursive calls. |
1072 | */ |
1073 | void __init debug_objects_mem_init(void) |
1074 | { |
1075 | if (!debug_objects_enabled) |
1076 | return; |
1077 | |
1078 | obj_cache = kmem_cache_create("debug_objects_cache", |
1079 | sizeof (struct debug_obj), 0, |
1080 | SLAB_DEBUG_OBJECTS, NULL); |
1081 | |
1082 | if (!obj_cache || debug_objects_replace_static_objects()) { |
1083 | debug_objects_enabled = 0; |
1084 | if (obj_cache) |
1085 | kmem_cache_destroy(obj_cache); |
1086 | printk(KERN_WARNING "ODEBUG: out of memory.\n"); |
1087 | } else |
1088 | debug_objects_selftest(); |
1089 | } |
1090 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9