Root/
1 | /* |
2 | * mm/kmemleak.c |
3 | * |
4 | * Copyright (C) 2008 ARM Limited |
5 | * Written by Catalin Marinas <catalin.marinas@arm.com> |
6 | * |
7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. |
10 | * |
11 | * This program is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. |
15 | * |
16 | * You should have received a copy of the GNU General Public License |
17 | * along with this program; if not, write to the Free Software |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
19 | * |
20 | * |
21 | * For more information on the algorithm and kmemleak usage, please see |
22 | * Documentation/kmemleak.txt. |
23 | * |
24 | * Notes on locking |
25 | * ---------------- |
26 | * |
27 | * The following locks and mutexes are used by kmemleak: |
28 | * |
29 | * - kmemleak_lock (rwlock): protects the object_list modifications and |
30 | * accesses to the object_tree_root. The object_list is the main list |
31 | * holding the metadata (struct kmemleak_object) for the allocated memory |
32 | * blocks. The object_tree_root is a priority search tree used to look-up |
33 | * metadata based on a pointer to the corresponding memory block. The |
34 | * kmemleak_object structures are added to the object_list and |
35 | * object_tree_root in the create_object() function called from the |
36 | * kmemleak_alloc() callback and removed in delete_object() called from the |
37 | * kmemleak_free() callback |
38 | * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to |
39 | * the metadata (e.g. count) are protected by this lock. Note that some |
40 | * members of this structure may be protected by other means (atomic or |
41 | * kmemleak_lock). This lock is also held when scanning the corresponding |
42 | * memory block to avoid the kernel freeing it via the kmemleak_free() |
43 | * callback. This is less heavyweight than holding a global lock like |
44 | * kmemleak_lock during scanning |
45 | * - scan_mutex (mutex): ensures that only one thread may scan the memory for |
46 | * unreferenced objects at a time. The gray_list contains the objects which |
47 | * are already referenced or marked as false positives and need to be |
48 | * scanned. This list is only modified during a scanning episode when the |
49 | * scan_mutex is held. At the end of a scan, the gray_list is always empty. |
50 | * Note that the kmemleak_object.use_count is incremented when an object is |
51 | * added to the gray_list and therefore cannot be freed. This mutex also |
52 | * prevents multiple users of the "kmemleak" debugfs file together with |
53 | * modifications to the memory scanning parameters including the scan_thread |
54 | * pointer |
55 | * |
56 | * The kmemleak_object structures have a use_count incremented or decremented |
57 | * using the get_object()/put_object() functions. When the use_count becomes |
58 | * 0, this count can no longer be incremented and put_object() schedules the |
59 | * kmemleak_object freeing via an RCU callback. All calls to the get_object() |
60 | * function must be protected by rcu_read_lock() to avoid accessing a freed |
61 | * structure. |
62 | */ |
63 | |
64 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
65 | |
66 | #include <linux/init.h> |
67 | #include <linux/kernel.h> |
68 | #include <linux/list.h> |
69 | #include <linux/sched.h> |
70 | #include <linux/jiffies.h> |
71 | #include <linux/delay.h> |
72 | #include <linux/module.h> |
73 | #include <linux/kthread.h> |
74 | #include <linux/prio_tree.h> |
75 | #include <linux/fs.h> |
76 | #include <linux/debugfs.h> |
77 | #include <linux/seq_file.h> |
78 | #include <linux/cpumask.h> |
79 | #include <linux/spinlock.h> |
80 | #include <linux/mutex.h> |
81 | #include <linux/rcupdate.h> |
82 | #include <linux/stacktrace.h> |
83 | #include <linux/cache.h> |
84 | #include <linux/percpu.h> |
85 | #include <linux/hardirq.h> |
86 | #include <linux/mmzone.h> |
87 | #include <linux/slab.h> |
88 | #include <linux/thread_info.h> |
89 | #include <linux/err.h> |
90 | #include <linux/uaccess.h> |
91 | #include <linux/string.h> |
92 | #include <linux/nodemask.h> |
93 | #include <linux/mm.h> |
94 | #include <linux/workqueue.h> |
95 | #include <linux/crc32.h> |
96 | |
97 | #include <asm/sections.h> |
98 | #include <asm/processor.h> |
99 | #include <asm/atomic.h> |
100 | |
101 | #include <linux/kmemcheck.h> |
102 | #include <linux/kmemleak.h> |
103 | |
104 | /* |
105 | * Kmemleak configuration and common defines. |
106 | */ |
107 | #define MAX_TRACE 16 /* stack trace length */ |
108 | #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */ |
109 | #define SECS_FIRST_SCAN 60 /* delay before the first scan */ |
110 | #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ |
111 | #define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */ |
112 | |
113 | #define BYTES_PER_POINTER sizeof(void *) |
114 | |
115 | /* GFP bitmask for kmemleak internal allocations */ |
116 | #define GFP_KMEMLEAK_MASK (GFP_KERNEL | GFP_ATOMIC) |
117 | |
118 | /* scanning area inside a memory block */ |
119 | struct kmemleak_scan_area { |
120 | struct hlist_node node; |
121 | unsigned long start; |
122 | size_t size; |
123 | }; |
124 | |
125 | #define KMEMLEAK_GREY 0 |
126 | #define KMEMLEAK_BLACK -1 |
127 | |
128 | /* |
129 | * Structure holding the metadata for each allocated memory block. |
130 | * Modifications to such objects should be made while holding the |
131 | * object->lock. Insertions or deletions from object_list, gray_list or |
132 | * tree_node are already protected by the corresponding locks or mutex (see |
133 | * the notes on locking above). These objects are reference-counted |
134 | * (use_count) and freed using the RCU mechanism. |
135 | */ |
136 | struct kmemleak_object { |
137 | spinlock_t lock; |
138 | unsigned long flags; /* object status flags */ |
139 | struct list_head object_list; |
140 | struct list_head gray_list; |
141 | struct prio_tree_node tree_node; |
142 | struct rcu_head rcu; /* object_list lockless traversal */ |
143 | /* object usage count; object freed when use_count == 0 */ |
144 | atomic_t use_count; |
145 | unsigned long pointer; |
146 | size_t size; |
147 | /* minimum number of a pointers found before it is considered leak */ |
148 | int min_count; |
149 | /* the total number of pointers found pointing to this object */ |
150 | int count; |
151 | /* checksum for detecting modified objects */ |
152 | u32 checksum; |
153 | /* memory ranges to be scanned inside an object (empty for all) */ |
154 | struct hlist_head area_list; |
155 | unsigned long trace[MAX_TRACE]; |
156 | unsigned int trace_len; |
157 | unsigned long jiffies; /* creation timestamp */ |
158 | pid_t pid; /* pid of the current task */ |
159 | char comm[TASK_COMM_LEN]; /* executable name */ |
160 | }; |
161 | |
162 | /* flag representing the memory block allocation status */ |
163 | #define OBJECT_ALLOCATED (1 << 0) |
164 | /* flag set after the first reporting of an unreference object */ |
165 | #define OBJECT_REPORTED (1 << 1) |
166 | /* flag set to not scan the object */ |
167 | #define OBJECT_NO_SCAN (1 << 2) |
168 | |
169 | /* number of bytes to print per line; must be 16 or 32 */ |
170 | #define HEX_ROW_SIZE 16 |
171 | /* number of bytes to print at a time (1, 2, 4, 8) */ |
172 | #define HEX_GROUP_SIZE 1 |
173 | /* include ASCII after the hex output */ |
174 | #define HEX_ASCII 1 |
175 | /* max number of lines to be printed */ |
176 | #define HEX_MAX_LINES 2 |
177 | |
178 | /* the list of all allocated objects */ |
179 | static LIST_HEAD(object_list); |
180 | /* the list of gray-colored objects (see color_gray comment below) */ |
181 | static LIST_HEAD(gray_list); |
182 | /* prio search tree for object boundaries */ |
183 | static struct prio_tree_root object_tree_root; |
184 | /* rw_lock protecting the access to object_list and prio_tree_root */ |
185 | static DEFINE_RWLOCK(kmemleak_lock); |
186 | |
187 | /* allocation caches for kmemleak internal data */ |
188 | static struct kmem_cache *object_cache; |
189 | static struct kmem_cache *scan_area_cache; |
190 | |
191 | /* set if tracing memory operations is enabled */ |
192 | static atomic_t kmemleak_enabled = ATOMIC_INIT(0); |
193 | /* set in the late_initcall if there were no errors */ |
194 | static atomic_t kmemleak_initialized = ATOMIC_INIT(0); |
195 | /* enables or disables early logging of the memory operations */ |
196 | static atomic_t kmemleak_early_log = ATOMIC_INIT(1); |
197 | /* set if a fata kmemleak error has occurred */ |
198 | static atomic_t kmemleak_error = ATOMIC_INIT(0); |
199 | |
200 | /* minimum and maximum address that may be valid pointers */ |
201 | static unsigned long min_addr = ULONG_MAX; |
202 | static unsigned long max_addr; |
203 | |
204 | static struct task_struct *scan_thread; |
205 | /* used to avoid reporting of recently allocated objects */ |
206 | static unsigned long jiffies_min_age; |
207 | static unsigned long jiffies_last_scan; |
208 | /* delay between automatic memory scannings */ |
209 | static signed long jiffies_scan_wait; |
210 | /* enables or disables the task stacks scanning */ |
211 | static int kmemleak_stack_scan = 1; |
212 | /* protects the memory scanning, parameters and debug/kmemleak file access */ |
213 | static DEFINE_MUTEX(scan_mutex); |
214 | |
215 | /* |
216 | * Early object allocation/freeing logging. Kmemleak is initialized after the |
217 | * kernel allocator. However, both the kernel allocator and kmemleak may |
218 | * allocate memory blocks which need to be tracked. Kmemleak defines an |
219 | * arbitrary buffer to hold the allocation/freeing information before it is |
220 | * fully initialized. |
221 | */ |
222 | |
223 | /* kmemleak operation type for early logging */ |
224 | enum { |
225 | KMEMLEAK_ALLOC, |
226 | KMEMLEAK_FREE, |
227 | KMEMLEAK_FREE_PART, |
228 | KMEMLEAK_NOT_LEAK, |
229 | KMEMLEAK_IGNORE, |
230 | KMEMLEAK_SCAN_AREA, |
231 | KMEMLEAK_NO_SCAN |
232 | }; |
233 | |
234 | /* |
235 | * Structure holding the information passed to kmemleak callbacks during the |
236 | * early logging. |
237 | */ |
238 | struct early_log { |
239 | int op_type; /* kmemleak operation type */ |
240 | const void *ptr; /* allocated/freed memory block */ |
241 | size_t size; /* memory block size */ |
242 | int min_count; /* minimum reference count */ |
243 | unsigned long trace[MAX_TRACE]; /* stack trace */ |
244 | unsigned int trace_len; /* stack trace length */ |
245 | }; |
246 | |
247 | /* early logging buffer and current position */ |
248 | static struct early_log |
249 | early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata; |
250 | static int crt_early_log __initdata; |
251 | |
252 | static void kmemleak_disable(void); |
253 | |
254 | /* |
255 | * Print a warning and dump the stack trace. |
256 | */ |
257 | #define kmemleak_warn(x...) do { \ |
258 | pr_warning(x); \ |
259 | dump_stack(); \ |
260 | } while (0) |
261 | |
262 | /* |
263 | * Macro invoked when a serious kmemleak condition occured and cannot be |
264 | * recovered from. Kmemleak will be disabled and further allocation/freeing |
265 | * tracing no longer available. |
266 | */ |
267 | #define kmemleak_stop(x...) do { \ |
268 | kmemleak_warn(x); \ |
269 | kmemleak_disable(); \ |
270 | } while (0) |
271 | |
272 | /* |
273 | * Printing of the objects hex dump to the seq file. The number of lines to be |
274 | * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The |
275 | * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called |
276 | * with the object->lock held. |
277 | */ |
278 | static void hex_dump_object(struct seq_file *seq, |
279 | struct kmemleak_object *object) |
280 | { |
281 | const u8 *ptr = (const u8 *)object->pointer; |
282 | int i, len, remaining; |
283 | unsigned char linebuf[HEX_ROW_SIZE * 5]; |
284 | |
285 | /* limit the number of lines to HEX_MAX_LINES */ |
286 | remaining = len = |
287 | min(object->size, (size_t)(HEX_MAX_LINES * HEX_ROW_SIZE)); |
288 | |
289 | seq_printf(seq, " hex dump (first %d bytes):\n", len); |
290 | for (i = 0; i < len; i += HEX_ROW_SIZE) { |
291 | int linelen = min(remaining, HEX_ROW_SIZE); |
292 | |
293 | remaining -= HEX_ROW_SIZE; |
294 | hex_dump_to_buffer(ptr + i, linelen, HEX_ROW_SIZE, |
295 | HEX_GROUP_SIZE, linebuf, sizeof(linebuf), |
296 | HEX_ASCII); |
297 | seq_printf(seq, " %s\n", linebuf); |
298 | } |
299 | } |
300 | |
301 | /* |
302 | * Object colors, encoded with count and min_count: |
303 | * - white - orphan object, not enough references to it (count < min_count) |
304 | * - gray - not orphan, not marked as false positive (min_count == 0) or |
305 | * sufficient references to it (count >= min_count) |
306 | * - black - ignore, it doesn't contain references (e.g. text section) |
307 | * (min_count == -1). No function defined for this color. |
308 | * Newly created objects don't have any color assigned (object->count == -1) |
309 | * before the next memory scan when they become white. |
310 | */ |
311 | static bool color_white(const struct kmemleak_object *object) |
312 | { |
313 | return object->count != KMEMLEAK_BLACK && |
314 | object->count < object->min_count; |
315 | } |
316 | |
317 | static bool color_gray(const struct kmemleak_object *object) |
318 | { |
319 | return object->min_count != KMEMLEAK_BLACK && |
320 | object->count >= object->min_count; |
321 | } |
322 | |
323 | /* |
324 | * Objects are considered unreferenced only if their color is white, they have |
325 | * not be deleted and have a minimum age to avoid false positives caused by |
326 | * pointers temporarily stored in CPU registers. |
327 | */ |
328 | static bool unreferenced_object(struct kmemleak_object *object) |
329 | { |
330 | return (color_white(object) && object->flags & OBJECT_ALLOCATED) && |
331 | time_before_eq(object->jiffies + jiffies_min_age, |
332 | jiffies_last_scan); |
333 | } |
334 | |
335 | /* |
336 | * Printing of the unreferenced objects information to the seq file. The |
337 | * print_unreferenced function must be called with the object->lock held. |
338 | */ |
339 | static void print_unreferenced(struct seq_file *seq, |
340 | struct kmemleak_object *object) |
341 | { |
342 | int i; |
343 | unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies); |
344 | |
345 | seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n", |
346 | object->pointer, object->size); |
347 | seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n", |
348 | object->comm, object->pid, object->jiffies, |
349 | msecs_age / 1000, msecs_age % 1000); |
350 | hex_dump_object(seq, object); |
351 | seq_printf(seq, " backtrace:\n"); |
352 | |
353 | for (i = 0; i < object->trace_len; i++) { |
354 | void *ptr = (void *)object->trace[i]; |
355 | seq_printf(seq, " [<%p>] %pS\n", ptr, ptr); |
356 | } |
357 | } |
358 | |
359 | /* |
360 | * Print the kmemleak_object information. This function is used mainly for |
361 | * debugging special cases when kmemleak operations. It must be called with |
362 | * the object->lock held. |
363 | */ |
364 | static void dump_object_info(struct kmemleak_object *object) |
365 | { |
366 | struct stack_trace trace; |
367 | |
368 | trace.nr_entries = object->trace_len; |
369 | trace.entries = object->trace; |
370 | |
371 | pr_notice("Object 0x%08lx (size %zu):\n", |
372 | object->tree_node.start, object->size); |
373 | pr_notice(" comm \"%s\", pid %d, jiffies %lu\n", |
374 | object->comm, object->pid, object->jiffies); |
375 | pr_notice(" min_count = %d\n", object->min_count); |
376 | pr_notice(" count = %d\n", object->count); |
377 | pr_notice(" flags = 0x%lx\n", object->flags); |
378 | pr_notice(" checksum = %d\n", object->checksum); |
379 | pr_notice(" backtrace:\n"); |
380 | print_stack_trace(&trace, 4); |
381 | } |
382 | |
383 | /* |
384 | * Look-up a memory block metadata (kmemleak_object) in the priority search |
385 | * tree based on a pointer value. If alias is 0, only values pointing to the |
386 | * beginning of the memory block are allowed. The kmemleak_lock must be held |
387 | * when calling this function. |
388 | */ |
389 | static struct kmemleak_object *lookup_object(unsigned long ptr, int alias) |
390 | { |
391 | struct prio_tree_node *node; |
392 | struct prio_tree_iter iter; |
393 | struct kmemleak_object *object; |
394 | |
395 | prio_tree_iter_init(&iter, &object_tree_root, ptr, ptr); |
396 | node = prio_tree_next(&iter); |
397 | if (node) { |
398 | object = prio_tree_entry(node, struct kmemleak_object, |
399 | tree_node); |
400 | if (!alias && object->pointer != ptr) { |
401 | kmemleak_warn("Found object by alias"); |
402 | object = NULL; |
403 | } |
404 | } else |
405 | object = NULL; |
406 | |
407 | return object; |
408 | } |
409 | |
410 | /* |
411 | * Increment the object use_count. Return 1 if successful or 0 otherwise. Note |
412 | * that once an object's use_count reached 0, the RCU freeing was already |
413 | * registered and the object should no longer be used. This function must be |
414 | * called under the protection of rcu_read_lock(). |
415 | */ |
416 | static int get_object(struct kmemleak_object *object) |
417 | { |
418 | return atomic_inc_not_zero(&object->use_count); |
419 | } |
420 | |
421 | /* |
422 | * RCU callback to free a kmemleak_object. |
423 | */ |
424 | static void free_object_rcu(struct rcu_head *rcu) |
425 | { |
426 | struct hlist_node *elem, *tmp; |
427 | struct kmemleak_scan_area *area; |
428 | struct kmemleak_object *object = |
429 | container_of(rcu, struct kmemleak_object, rcu); |
430 | |
431 | /* |
432 | * Once use_count is 0 (guaranteed by put_object), there is no other |
433 | * code accessing this object, hence no need for locking. |
434 | */ |
435 | hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) { |
436 | hlist_del(elem); |
437 | kmem_cache_free(scan_area_cache, area); |
438 | } |
439 | kmem_cache_free(object_cache, object); |
440 | } |
441 | |
442 | /* |
443 | * Decrement the object use_count. Once the count is 0, free the object using |
444 | * an RCU callback. Since put_object() may be called via the kmemleak_free() -> |
445 | * delete_object() path, the delayed RCU freeing ensures that there is no |
446 | * recursive call to the kernel allocator. Lock-less RCU object_list traversal |
447 | * is also possible. |
448 | */ |
449 | static void put_object(struct kmemleak_object *object) |
450 | { |
451 | if (!atomic_dec_and_test(&object->use_count)) |
452 | return; |
453 | |
454 | /* should only get here after delete_object was called */ |
455 | WARN_ON(object->flags & OBJECT_ALLOCATED); |
456 | |
457 | call_rcu(&object->rcu, free_object_rcu); |
458 | } |
459 | |
460 | /* |
461 | * Look up an object in the prio search tree and increase its use_count. |
462 | */ |
463 | static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias) |
464 | { |
465 | unsigned long flags; |
466 | struct kmemleak_object *object = NULL; |
467 | |
468 | rcu_read_lock(); |
469 | read_lock_irqsave(&kmemleak_lock, flags); |
470 | if (ptr >= min_addr && ptr < max_addr) |
471 | object = lookup_object(ptr, alias); |
472 | read_unlock_irqrestore(&kmemleak_lock, flags); |
473 | |
474 | /* check whether the object is still available */ |
475 | if (object && !get_object(object)) |
476 | object = NULL; |
477 | rcu_read_unlock(); |
478 | |
479 | return object; |
480 | } |
481 | |
482 | /* |
483 | * Save stack trace to the given array of MAX_TRACE size. |
484 | */ |
485 | static int __save_stack_trace(unsigned long *trace) |
486 | { |
487 | struct stack_trace stack_trace; |
488 | |
489 | stack_trace.max_entries = MAX_TRACE; |
490 | stack_trace.nr_entries = 0; |
491 | stack_trace.entries = trace; |
492 | stack_trace.skip = 2; |
493 | save_stack_trace(&stack_trace); |
494 | |
495 | return stack_trace.nr_entries; |
496 | } |
497 | |
498 | /* |
499 | * Create the metadata (struct kmemleak_object) corresponding to an allocated |
500 | * memory block and add it to the object_list and object_tree_root. |
501 | */ |
502 | static struct kmemleak_object *create_object(unsigned long ptr, size_t size, |
503 | int min_count, gfp_t gfp) |
504 | { |
505 | unsigned long flags; |
506 | struct kmemleak_object *object; |
507 | struct prio_tree_node *node; |
508 | |
509 | object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK); |
510 | if (!object) { |
511 | kmemleak_stop("Cannot allocate a kmemleak_object structure\n"); |
512 | return NULL; |
513 | } |
514 | |
515 | INIT_LIST_HEAD(&object->object_list); |
516 | INIT_LIST_HEAD(&object->gray_list); |
517 | INIT_HLIST_HEAD(&object->area_list); |
518 | spin_lock_init(&object->lock); |
519 | atomic_set(&object->use_count, 1); |
520 | object->flags = OBJECT_ALLOCATED; |
521 | object->pointer = ptr; |
522 | object->size = size; |
523 | object->min_count = min_count; |
524 | object->count = 0; /* white color initially */ |
525 | object->jiffies = jiffies; |
526 | object->checksum = 0; |
527 | |
528 | /* task information */ |
529 | if (in_irq()) { |
530 | object->pid = 0; |
531 | strncpy(object->comm, "hardirq", sizeof(object->comm)); |
532 | } else if (in_softirq()) { |
533 | object->pid = 0; |
534 | strncpy(object->comm, "softirq", sizeof(object->comm)); |
535 | } else { |
536 | object->pid = current->pid; |
537 | /* |
538 | * There is a small chance of a race with set_task_comm(), |
539 | * however using get_task_comm() here may cause locking |
540 | * dependency issues with current->alloc_lock. In the worst |
541 | * case, the command line is not correct. |
542 | */ |
543 | strncpy(object->comm, current->comm, sizeof(object->comm)); |
544 | } |
545 | |
546 | /* kernel backtrace */ |
547 | object->trace_len = __save_stack_trace(object->trace); |
548 | |
549 | INIT_PRIO_TREE_NODE(&object->tree_node); |
550 | object->tree_node.start = ptr; |
551 | object->tree_node.last = ptr + size - 1; |
552 | |
553 | write_lock_irqsave(&kmemleak_lock, flags); |
554 | |
555 | min_addr = min(min_addr, ptr); |
556 | max_addr = max(max_addr, ptr + size); |
557 | node = prio_tree_insert(&object_tree_root, &object->tree_node); |
558 | /* |
559 | * The code calling the kernel does not yet have the pointer to the |
560 | * memory block to be able to free it. However, we still hold the |
561 | * kmemleak_lock here in case parts of the kernel started freeing |
562 | * random memory blocks. |
563 | */ |
564 | if (node != &object->tree_node) { |
565 | kmemleak_stop("Cannot insert 0x%lx into the object search tree " |
566 | "(already existing)\n", ptr); |
567 | object = lookup_object(ptr, 1); |
568 | spin_lock(&object->lock); |
569 | dump_object_info(object); |
570 | spin_unlock(&object->lock); |
571 | |
572 | goto out; |
573 | } |
574 | list_add_tail_rcu(&object->object_list, &object_list); |
575 | out: |
576 | write_unlock_irqrestore(&kmemleak_lock, flags); |
577 | return object; |
578 | } |
579 | |
580 | /* |
581 | * Remove the metadata (struct kmemleak_object) for a memory block from the |
582 | * object_list and object_tree_root and decrement its use_count. |
583 | */ |
584 | static void __delete_object(struct kmemleak_object *object) |
585 | { |
586 | unsigned long flags; |
587 | |
588 | write_lock_irqsave(&kmemleak_lock, flags); |
589 | prio_tree_remove(&object_tree_root, &object->tree_node); |
590 | list_del_rcu(&object->object_list); |
591 | write_unlock_irqrestore(&kmemleak_lock, flags); |
592 | |
593 | WARN_ON(!(object->flags & OBJECT_ALLOCATED)); |
594 | WARN_ON(atomic_read(&object->use_count) < 2); |
595 | |
596 | /* |
597 | * Locking here also ensures that the corresponding memory block |
598 | * cannot be freed when it is being scanned. |
599 | */ |
600 | spin_lock_irqsave(&object->lock, flags); |
601 | object->flags &= ~OBJECT_ALLOCATED; |
602 | spin_unlock_irqrestore(&object->lock, flags); |
603 | put_object(object); |
604 | } |
605 | |
606 | /* |
607 | * Look up the metadata (struct kmemleak_object) corresponding to ptr and |
608 | * delete it. |
609 | */ |
610 | static void delete_object_full(unsigned long ptr) |
611 | { |
612 | struct kmemleak_object *object; |
613 | |
614 | object = find_and_get_object(ptr, 0); |
615 | if (!object) { |
616 | #ifdef DEBUG |
617 | kmemleak_warn("Freeing unknown object at 0x%08lx\n", |
618 | ptr); |
619 | #endif |
620 | return; |
621 | } |
622 | __delete_object(object); |
623 | put_object(object); |
624 | } |
625 | |
626 | /* |
627 | * Look up the metadata (struct kmemleak_object) corresponding to ptr and |
628 | * delete it. If the memory block is partially freed, the function may create |
629 | * additional metadata for the remaining parts of the block. |
630 | */ |
631 | static void delete_object_part(unsigned long ptr, size_t size) |
632 | { |
633 | struct kmemleak_object *object; |
634 | unsigned long start, end; |
635 | |
636 | object = find_and_get_object(ptr, 1); |
637 | if (!object) { |
638 | #ifdef DEBUG |
639 | kmemleak_warn("Partially freeing unknown object at 0x%08lx " |
640 | "(size %zu)\n", ptr, size); |
641 | #endif |
642 | return; |
643 | } |
644 | __delete_object(object); |
645 | |
646 | /* |
647 | * Create one or two objects that may result from the memory block |
648 | * split. Note that partial freeing is only done by free_bootmem() and |
649 | * this happens before kmemleak_init() is called. The path below is |
650 | * only executed during early log recording in kmemleak_init(), so |
651 | * GFP_KERNEL is enough. |
652 | */ |
653 | start = object->pointer; |
654 | end = object->pointer + object->size; |
655 | if (ptr > start) |
656 | create_object(start, ptr - start, object->min_count, |
657 | GFP_KERNEL); |
658 | if (ptr + size < end) |
659 | create_object(ptr + size, end - ptr - size, object->min_count, |
660 | GFP_KERNEL); |
661 | |
662 | put_object(object); |
663 | } |
664 | |
665 | static void __paint_it(struct kmemleak_object *object, int color) |
666 | { |
667 | object->min_count = color; |
668 | if (color == KMEMLEAK_BLACK) |
669 | object->flags |= OBJECT_NO_SCAN; |
670 | } |
671 | |
672 | static void paint_it(struct kmemleak_object *object, int color) |
673 | { |
674 | unsigned long flags; |
675 | |
676 | spin_lock_irqsave(&object->lock, flags); |
677 | __paint_it(object, color); |
678 | spin_unlock_irqrestore(&object->lock, flags); |
679 | } |
680 | |
681 | static void paint_ptr(unsigned long ptr, int color) |
682 | { |
683 | struct kmemleak_object *object; |
684 | |
685 | object = find_and_get_object(ptr, 0); |
686 | if (!object) { |
687 | kmemleak_warn("Trying to color unknown object " |
688 | "at 0x%08lx as %s\n", ptr, |
689 | (color == KMEMLEAK_GREY) ? "Grey" : |
690 | (color == KMEMLEAK_BLACK) ? "Black" : "Unknown"); |
691 | return; |
692 | } |
693 | paint_it(object, color); |
694 | put_object(object); |
695 | } |
696 | |
697 | /* |
698 | * Make a object permanently as gray-colored so that it can no longer be |
699 | * reported as a leak. This is used in general to mark a false positive. |
700 | */ |
701 | static void make_gray_object(unsigned long ptr) |
702 | { |
703 | paint_ptr(ptr, KMEMLEAK_GREY); |
704 | } |
705 | |
706 | /* |
707 | * Mark the object as black-colored so that it is ignored from scans and |
708 | * reporting. |
709 | */ |
710 | static void make_black_object(unsigned long ptr) |
711 | { |
712 | paint_ptr(ptr, KMEMLEAK_BLACK); |
713 | } |
714 | |
715 | /* |
716 | * Add a scanning area to the object. If at least one such area is added, |
717 | * kmemleak will only scan these ranges rather than the whole memory block. |
718 | */ |
719 | static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) |
720 | { |
721 | unsigned long flags; |
722 | struct kmemleak_object *object; |
723 | struct kmemleak_scan_area *area; |
724 | |
725 | object = find_and_get_object(ptr, 1); |
726 | if (!object) { |
727 | kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n", |
728 | ptr); |
729 | return; |
730 | } |
731 | |
732 | area = kmem_cache_alloc(scan_area_cache, gfp & GFP_KMEMLEAK_MASK); |
733 | if (!area) { |
734 | kmemleak_warn("Cannot allocate a scan area\n"); |
735 | goto out; |
736 | } |
737 | |
738 | spin_lock_irqsave(&object->lock, flags); |
739 | if (ptr + size > object->pointer + object->size) { |
740 | kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr); |
741 | dump_object_info(object); |
742 | kmem_cache_free(scan_area_cache, area); |
743 | goto out_unlock; |
744 | } |
745 | |
746 | INIT_HLIST_NODE(&area->node); |
747 | area->start = ptr; |
748 | area->size = size; |
749 | |
750 | hlist_add_head(&area->node, &object->area_list); |
751 | out_unlock: |
752 | spin_unlock_irqrestore(&object->lock, flags); |
753 | out: |
754 | put_object(object); |
755 | } |
756 | |
757 | /* |
758 | * Set the OBJECT_NO_SCAN flag for the object corresponding to the give |
759 | * pointer. Such object will not be scanned by kmemleak but references to it |
760 | * are searched. |
761 | */ |
762 | static void object_no_scan(unsigned long ptr) |
763 | { |
764 | unsigned long flags; |
765 | struct kmemleak_object *object; |
766 | |
767 | object = find_and_get_object(ptr, 0); |
768 | if (!object) { |
769 | kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr); |
770 | return; |
771 | } |
772 | |
773 | spin_lock_irqsave(&object->lock, flags); |
774 | object->flags |= OBJECT_NO_SCAN; |
775 | spin_unlock_irqrestore(&object->lock, flags); |
776 | put_object(object); |
777 | } |
778 | |
779 | /* |
780 | * Log an early kmemleak_* call to the early_log buffer. These calls will be |
781 | * processed later once kmemleak is fully initialized. |
782 | */ |
783 | static void __init log_early(int op_type, const void *ptr, size_t size, |
784 | int min_count) |
785 | { |
786 | unsigned long flags; |
787 | struct early_log *log; |
788 | |
789 | if (crt_early_log >= ARRAY_SIZE(early_log)) { |
790 | pr_warning("Early log buffer exceeded, " |
791 | "please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n"); |
792 | kmemleak_disable(); |
793 | return; |
794 | } |
795 | |
796 | /* |
797 | * There is no need for locking since the kernel is still in UP mode |
798 | * at this stage. Disabling the IRQs is enough. |
799 | */ |
800 | local_irq_save(flags); |
801 | log = &early_log[crt_early_log]; |
802 | log->op_type = op_type; |
803 | log->ptr = ptr; |
804 | log->size = size; |
805 | log->min_count = min_count; |
806 | if (op_type == KMEMLEAK_ALLOC) |
807 | log->trace_len = __save_stack_trace(log->trace); |
808 | crt_early_log++; |
809 | local_irq_restore(flags); |
810 | } |
811 | |
812 | /* |
813 | * Log an early allocated block and populate the stack trace. |
814 | */ |
815 | static void early_alloc(struct early_log *log) |
816 | { |
817 | struct kmemleak_object *object; |
818 | unsigned long flags; |
819 | int i; |
820 | |
821 | if (!atomic_read(&kmemleak_enabled) || !log->ptr || IS_ERR(log->ptr)) |
822 | return; |
823 | |
824 | /* |
825 | * RCU locking needed to ensure object is not freed via put_object(). |
826 | */ |
827 | rcu_read_lock(); |
828 | object = create_object((unsigned long)log->ptr, log->size, |
829 | log->min_count, GFP_ATOMIC); |
830 | if (!object) |
831 | goto out; |
832 | spin_lock_irqsave(&object->lock, flags); |
833 | for (i = 0; i < log->trace_len; i++) |
834 | object->trace[i] = log->trace[i]; |
835 | object->trace_len = log->trace_len; |
836 | spin_unlock_irqrestore(&object->lock, flags); |
837 | out: |
838 | rcu_read_unlock(); |
839 | } |
840 | |
841 | /* |
842 | * Memory allocation function callback. This function is called from the |
843 | * kernel allocators when a new block is allocated (kmem_cache_alloc, kmalloc, |
844 | * vmalloc etc.). |
845 | */ |
846 | void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count, |
847 | gfp_t gfp) |
848 | { |
849 | pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count); |
850 | |
851 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) |
852 | create_object((unsigned long)ptr, size, min_count, gfp); |
853 | else if (atomic_read(&kmemleak_early_log)) |
854 | log_early(KMEMLEAK_ALLOC, ptr, size, min_count); |
855 | } |
856 | EXPORT_SYMBOL_GPL(kmemleak_alloc); |
857 | |
858 | /* |
859 | * Memory freeing function callback. This function is called from the kernel |
860 | * allocators when a block is freed (kmem_cache_free, kfree, vfree etc.). |
861 | */ |
862 | void __ref kmemleak_free(const void *ptr) |
863 | { |
864 | pr_debug("%s(0x%p)\n", __func__, ptr); |
865 | |
866 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) |
867 | delete_object_full((unsigned long)ptr); |
868 | else if (atomic_read(&kmemleak_early_log)) |
869 | log_early(KMEMLEAK_FREE, ptr, 0, 0); |
870 | } |
871 | EXPORT_SYMBOL_GPL(kmemleak_free); |
872 | |
873 | /* |
874 | * Partial memory freeing function callback. This function is usually called |
875 | * from bootmem allocator when (part of) a memory block is freed. |
876 | */ |
877 | void __ref kmemleak_free_part(const void *ptr, size_t size) |
878 | { |
879 | pr_debug("%s(0x%p)\n", __func__, ptr); |
880 | |
881 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) |
882 | delete_object_part((unsigned long)ptr, size); |
883 | else if (atomic_read(&kmemleak_early_log)) |
884 | log_early(KMEMLEAK_FREE_PART, ptr, size, 0); |
885 | } |
886 | EXPORT_SYMBOL_GPL(kmemleak_free_part); |
887 | |
888 | /* |
889 | * Mark an already allocated memory block as a false positive. This will cause |
890 | * the block to no longer be reported as leak and always be scanned. |
891 | */ |
892 | void __ref kmemleak_not_leak(const void *ptr) |
893 | { |
894 | pr_debug("%s(0x%p)\n", __func__, ptr); |
895 | |
896 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) |
897 | make_gray_object((unsigned long)ptr); |
898 | else if (atomic_read(&kmemleak_early_log)) |
899 | log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0); |
900 | } |
901 | EXPORT_SYMBOL(kmemleak_not_leak); |
902 | |
903 | /* |
904 | * Ignore a memory block. This is usually done when it is known that the |
905 | * corresponding block is not a leak and does not contain any references to |
906 | * other allocated memory blocks. |
907 | */ |
908 | void __ref kmemleak_ignore(const void *ptr) |
909 | { |
910 | pr_debug("%s(0x%p)\n", __func__, ptr); |
911 | |
912 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) |
913 | make_black_object((unsigned long)ptr); |
914 | else if (atomic_read(&kmemleak_early_log)) |
915 | log_early(KMEMLEAK_IGNORE, ptr, 0, 0); |
916 | } |
917 | EXPORT_SYMBOL(kmemleak_ignore); |
918 | |
919 | /* |
920 | * Limit the range to be scanned in an allocated memory block. |
921 | */ |
922 | void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) |
923 | { |
924 | pr_debug("%s(0x%p)\n", __func__, ptr); |
925 | |
926 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) |
927 | add_scan_area((unsigned long)ptr, size, gfp); |
928 | else if (atomic_read(&kmemleak_early_log)) |
929 | log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0); |
930 | } |
931 | EXPORT_SYMBOL(kmemleak_scan_area); |
932 | |
933 | /* |
934 | * Inform kmemleak not to scan the given memory block. |
935 | */ |
936 | void __ref kmemleak_no_scan(const void *ptr) |
937 | { |
938 | pr_debug("%s(0x%p)\n", __func__, ptr); |
939 | |
940 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) |
941 | object_no_scan((unsigned long)ptr); |
942 | else if (atomic_read(&kmemleak_early_log)) |
943 | log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0); |
944 | } |
945 | EXPORT_SYMBOL(kmemleak_no_scan); |
946 | |
947 | /* |
948 | * Update an object's checksum and return true if it was modified. |
949 | */ |
950 | static bool update_checksum(struct kmemleak_object *object) |
951 | { |
952 | u32 old_csum = object->checksum; |
953 | |
954 | if (!kmemcheck_is_obj_initialized(object->pointer, object->size)) |
955 | return false; |
956 | |
957 | object->checksum = crc32(0, (void *)object->pointer, object->size); |
958 | return object->checksum != old_csum; |
959 | } |
960 | |
961 | /* |
962 | * Memory scanning is a long process and it needs to be interruptable. This |
963 | * function checks whether such interrupt condition occured. |
964 | */ |
965 | static int scan_should_stop(void) |
966 | { |
967 | if (!atomic_read(&kmemleak_enabled)) |
968 | return 1; |
969 | |
970 | /* |
971 | * This function may be called from either process or kthread context, |
972 | * hence the need to check for both stop conditions. |
973 | */ |
974 | if (current->mm) |
975 | return signal_pending(current); |
976 | else |
977 | return kthread_should_stop(); |
978 | |
979 | return 0; |
980 | } |
981 | |
982 | /* |
983 | * Scan a memory block (exclusive range) for valid pointers and add those |
984 | * found to the gray list. |
985 | */ |
986 | static void scan_block(void *_start, void *_end, |
987 | struct kmemleak_object *scanned, int allow_resched) |
988 | { |
989 | unsigned long *ptr; |
990 | unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); |
991 | unsigned long *end = _end - (BYTES_PER_POINTER - 1); |
992 | |
993 | for (ptr = start; ptr < end; ptr++) { |
994 | struct kmemleak_object *object; |
995 | unsigned long flags; |
996 | unsigned long pointer; |
997 | |
998 | if (allow_resched) |
999 | cond_resched(); |
1000 | if (scan_should_stop()) |
1001 | break; |
1002 | |
1003 | /* don't scan uninitialized memory */ |
1004 | if (!kmemcheck_is_obj_initialized((unsigned long)ptr, |
1005 | BYTES_PER_POINTER)) |
1006 | continue; |
1007 | |
1008 | pointer = *ptr; |
1009 | |
1010 | object = find_and_get_object(pointer, 1); |
1011 | if (!object) |
1012 | continue; |
1013 | if (object == scanned) { |
1014 | /* self referenced, ignore */ |
1015 | put_object(object); |
1016 | continue; |
1017 | } |
1018 | |
1019 | /* |
1020 | * Avoid the lockdep recursive warning on object->lock being |
1021 | * previously acquired in scan_object(). These locks are |
1022 | * enclosed by scan_mutex. |
1023 | */ |
1024 | spin_lock_irqsave_nested(&object->lock, flags, |
1025 | SINGLE_DEPTH_NESTING); |
1026 | if (!color_white(object)) { |
1027 | /* non-orphan, ignored or new */ |
1028 | spin_unlock_irqrestore(&object->lock, flags); |
1029 | put_object(object); |
1030 | continue; |
1031 | } |
1032 | |
1033 | /* |
1034 | * Increase the object's reference count (number of pointers |
1035 | * to the memory block). If this count reaches the required |
1036 | * minimum, the object's color will become gray and it will be |
1037 | * added to the gray_list. |
1038 | */ |
1039 | object->count++; |
1040 | if (color_gray(object)) { |
1041 | list_add_tail(&object->gray_list, &gray_list); |
1042 | spin_unlock_irqrestore(&object->lock, flags); |
1043 | continue; |
1044 | } |
1045 | |
1046 | spin_unlock_irqrestore(&object->lock, flags); |
1047 | put_object(object); |
1048 | } |
1049 | } |
1050 | |
1051 | /* |
1052 | * Scan a memory block corresponding to a kmemleak_object. A condition is |
1053 | * that object->use_count >= 1. |
1054 | */ |
1055 | static void scan_object(struct kmemleak_object *object) |
1056 | { |
1057 | struct kmemleak_scan_area *area; |
1058 | struct hlist_node *elem; |
1059 | unsigned long flags; |
1060 | |
1061 | /* |
1062 | * Once the object->lock is acquired, the corresponding memory block |
1063 | * cannot be freed (the same lock is acquired in delete_object). |
1064 | */ |
1065 | spin_lock_irqsave(&object->lock, flags); |
1066 | if (object->flags & OBJECT_NO_SCAN) |
1067 | goto out; |
1068 | if (!(object->flags & OBJECT_ALLOCATED)) |
1069 | /* already freed object */ |
1070 | goto out; |
1071 | if (hlist_empty(&object->area_list)) { |
1072 | void *start = (void *)object->pointer; |
1073 | void *end = (void *)(object->pointer + object->size); |
1074 | |
1075 | while (start < end && (object->flags & OBJECT_ALLOCATED) && |
1076 | !(object->flags & OBJECT_NO_SCAN)) { |
1077 | scan_block(start, min(start + MAX_SCAN_SIZE, end), |
1078 | object, 0); |
1079 | start += MAX_SCAN_SIZE; |
1080 | |
1081 | spin_unlock_irqrestore(&object->lock, flags); |
1082 | cond_resched(); |
1083 | spin_lock_irqsave(&object->lock, flags); |
1084 | } |
1085 | } else |
1086 | hlist_for_each_entry(area, elem, &object->area_list, node) |
1087 | scan_block((void *)area->start, |
1088 | (void *)(area->start + area->size), |
1089 | object, 0); |
1090 | out: |
1091 | spin_unlock_irqrestore(&object->lock, flags); |
1092 | } |
1093 | |
1094 | /* |
1095 | * Scan the objects already referenced (gray objects). More objects will be |
1096 | * referenced and, if there are no memory leaks, all the objects are scanned. |
1097 | */ |
1098 | static void scan_gray_list(void) |
1099 | { |
1100 | struct kmemleak_object *object, *tmp; |
1101 | |
1102 | /* |
1103 | * The list traversal is safe for both tail additions and removals |
1104 | * from inside the loop. The kmemleak objects cannot be freed from |
1105 | * outside the loop because their use_count was incremented. |
1106 | */ |
1107 | object = list_entry(gray_list.next, typeof(*object), gray_list); |
1108 | while (&object->gray_list != &gray_list) { |
1109 | cond_resched(); |
1110 | |
1111 | /* may add new objects to the list */ |
1112 | if (!scan_should_stop()) |
1113 | scan_object(object); |
1114 | |
1115 | tmp = list_entry(object->gray_list.next, typeof(*object), |
1116 | gray_list); |
1117 | |
1118 | /* remove the object from the list and release it */ |
1119 | list_del(&object->gray_list); |
1120 | put_object(object); |
1121 | |
1122 | object = tmp; |
1123 | } |
1124 | WARN_ON(!list_empty(&gray_list)); |
1125 | } |
1126 | |
1127 | /* |
1128 | * Scan data sections and all the referenced memory blocks allocated via the |
1129 | * kernel's standard allocators. This function must be called with the |
1130 | * scan_mutex held. |
1131 | */ |
1132 | static void kmemleak_scan(void) |
1133 | { |
1134 | unsigned long flags; |
1135 | struct kmemleak_object *object; |
1136 | int i; |
1137 | int new_leaks = 0; |
1138 | |
1139 | jiffies_last_scan = jiffies; |
1140 | |
1141 | /* prepare the kmemleak_object's */ |
1142 | rcu_read_lock(); |
1143 | list_for_each_entry_rcu(object, &object_list, object_list) { |
1144 | spin_lock_irqsave(&object->lock, flags); |
1145 | #ifdef DEBUG |
1146 | /* |
1147 | * With a few exceptions there should be a maximum of |
1148 | * 1 reference to any object at this point. |
1149 | */ |
1150 | if (atomic_read(&object->use_count) > 1) { |
1151 | pr_debug("object->use_count = %d\n", |
1152 | atomic_read(&object->use_count)); |
1153 | dump_object_info(object); |
1154 | } |
1155 | #endif |
1156 | /* reset the reference count (whiten the object) */ |
1157 | object->count = 0; |
1158 | if (color_gray(object) && get_object(object)) |
1159 | list_add_tail(&object->gray_list, &gray_list); |
1160 | |
1161 | spin_unlock_irqrestore(&object->lock, flags); |
1162 | } |
1163 | rcu_read_unlock(); |
1164 | |
1165 | /* data/bss scanning */ |
1166 | scan_block(_sdata, _edata, NULL, 1); |
1167 | scan_block(__bss_start, __bss_stop, NULL, 1); |
1168 | |
1169 | #ifdef CONFIG_SMP |
1170 | /* per-cpu sections scanning */ |
1171 | for_each_possible_cpu(i) |
1172 | scan_block(__per_cpu_start + per_cpu_offset(i), |
1173 | __per_cpu_end + per_cpu_offset(i), NULL, 1); |
1174 | #endif |
1175 | |
1176 | /* |
1177 | * Struct page scanning for each node. The code below is not yet safe |
1178 | * with MEMORY_HOTPLUG. |
1179 | */ |
1180 | for_each_online_node(i) { |
1181 | pg_data_t *pgdat = NODE_DATA(i); |
1182 | unsigned long start_pfn = pgdat->node_start_pfn; |
1183 | unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; |
1184 | unsigned long pfn; |
1185 | |
1186 | for (pfn = start_pfn; pfn < end_pfn; pfn++) { |
1187 | struct page *page; |
1188 | |
1189 | if (!pfn_valid(pfn)) |
1190 | continue; |
1191 | page = pfn_to_page(pfn); |
1192 | /* only scan if page is in use */ |
1193 | if (page_count(page) == 0) |
1194 | continue; |
1195 | scan_block(page, page + 1, NULL, 1); |
1196 | } |
1197 | } |
1198 | |
1199 | /* |
1200 | * Scanning the task stacks (may introduce false negatives). |
1201 | */ |
1202 | if (kmemleak_stack_scan) { |
1203 | struct task_struct *p, *g; |
1204 | |
1205 | read_lock(&tasklist_lock); |
1206 | do_each_thread(g, p) { |
1207 | scan_block(task_stack_page(p), task_stack_page(p) + |
1208 | THREAD_SIZE, NULL, 0); |
1209 | } while_each_thread(g, p); |
1210 | read_unlock(&tasklist_lock); |
1211 | } |
1212 | |
1213 | /* |
1214 | * Scan the objects already referenced from the sections scanned |
1215 | * above. |
1216 | */ |
1217 | scan_gray_list(); |
1218 | |
1219 | /* |
1220 | * Check for new or unreferenced objects modified since the previous |
1221 | * scan and color them gray until the next scan. |
1222 | */ |
1223 | rcu_read_lock(); |
1224 | list_for_each_entry_rcu(object, &object_list, object_list) { |
1225 | spin_lock_irqsave(&object->lock, flags); |
1226 | if (color_white(object) && (object->flags & OBJECT_ALLOCATED) |
1227 | && update_checksum(object) && get_object(object)) { |
1228 | /* color it gray temporarily */ |
1229 | object->count = object->min_count; |
1230 | list_add_tail(&object->gray_list, &gray_list); |
1231 | } |
1232 | spin_unlock_irqrestore(&object->lock, flags); |
1233 | } |
1234 | rcu_read_unlock(); |
1235 | |
1236 | /* |
1237 | * Re-scan the gray list for modified unreferenced objects. |
1238 | */ |
1239 | scan_gray_list(); |
1240 | |
1241 | /* |
1242 | * If scanning was stopped do not report any new unreferenced objects. |
1243 | */ |
1244 | if (scan_should_stop()) |
1245 | return; |
1246 | |
1247 | /* |
1248 | * Scanning result reporting. |
1249 | */ |
1250 | rcu_read_lock(); |
1251 | list_for_each_entry_rcu(object, &object_list, object_list) { |
1252 | spin_lock_irqsave(&object->lock, flags); |
1253 | if (unreferenced_object(object) && |
1254 | !(object->flags & OBJECT_REPORTED)) { |
1255 | object->flags |= OBJECT_REPORTED; |
1256 | new_leaks++; |
1257 | } |
1258 | spin_unlock_irqrestore(&object->lock, flags); |
1259 | } |
1260 | rcu_read_unlock(); |
1261 | |
1262 | if (new_leaks) |
1263 | pr_info("%d new suspected memory leaks (see " |
1264 | "/sys/kernel/debug/kmemleak)\n", new_leaks); |
1265 | |
1266 | } |
1267 | |
1268 | /* |
1269 | * Thread function performing automatic memory scanning. Unreferenced objects |
1270 | * at the end of a memory scan are reported but only the first time. |
1271 | */ |
1272 | static int kmemleak_scan_thread(void *arg) |
1273 | { |
1274 | static int first_run = 1; |
1275 | |
1276 | pr_info("Automatic memory scanning thread started\n"); |
1277 | set_user_nice(current, 10); |
1278 | |
1279 | /* |
1280 | * Wait before the first scan to allow the system to fully initialize. |
1281 | */ |
1282 | if (first_run) { |
1283 | first_run = 0; |
1284 | ssleep(SECS_FIRST_SCAN); |
1285 | } |
1286 | |
1287 | while (!kthread_should_stop()) { |
1288 | signed long timeout = jiffies_scan_wait; |
1289 | |
1290 | mutex_lock(&scan_mutex); |
1291 | kmemleak_scan(); |
1292 | mutex_unlock(&scan_mutex); |
1293 | |
1294 | /* wait before the next scan */ |
1295 | while (timeout && !kthread_should_stop()) |
1296 | timeout = schedule_timeout_interruptible(timeout); |
1297 | } |
1298 | |
1299 | pr_info("Automatic memory scanning thread ended\n"); |
1300 | |
1301 | return 0; |
1302 | } |
1303 | |
1304 | /* |
1305 | * Start the automatic memory scanning thread. This function must be called |
1306 | * with the scan_mutex held. |
1307 | */ |
1308 | static void start_scan_thread(void) |
1309 | { |
1310 | if (scan_thread) |
1311 | return; |
1312 | scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak"); |
1313 | if (IS_ERR(scan_thread)) { |
1314 | pr_warning("Failed to create the scan thread\n"); |
1315 | scan_thread = NULL; |
1316 | } |
1317 | } |
1318 | |
1319 | /* |
1320 | * Stop the automatic memory scanning thread. This function must be called |
1321 | * with the scan_mutex held. |
1322 | */ |
1323 | static void stop_scan_thread(void) |
1324 | { |
1325 | if (scan_thread) { |
1326 | kthread_stop(scan_thread); |
1327 | scan_thread = NULL; |
1328 | } |
1329 | } |
1330 | |
1331 | /* |
1332 | * Iterate over the object_list and return the first valid object at or after |
1333 | * the required position with its use_count incremented. The function triggers |
1334 | * a memory scanning when the pos argument points to the first position. |
1335 | */ |
1336 | static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos) |
1337 | { |
1338 | struct kmemleak_object *object; |
1339 | loff_t n = *pos; |
1340 | int err; |
1341 | |
1342 | err = mutex_lock_interruptible(&scan_mutex); |
1343 | if (err < 0) |
1344 | return ERR_PTR(err); |
1345 | |
1346 | rcu_read_lock(); |
1347 | list_for_each_entry_rcu(object, &object_list, object_list) { |
1348 | if (n-- > 0) |
1349 | continue; |
1350 | if (get_object(object)) |
1351 | goto out; |
1352 | } |
1353 | object = NULL; |
1354 | out: |
1355 | return object; |
1356 | } |
1357 | |
1358 | /* |
1359 | * Return the next object in the object_list. The function decrements the |
1360 | * use_count of the previous object and increases that of the next one. |
1361 | */ |
1362 | static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
1363 | { |
1364 | struct kmemleak_object *prev_obj = v; |
1365 | struct kmemleak_object *next_obj = NULL; |
1366 | struct list_head *n = &prev_obj->object_list; |
1367 | |
1368 | ++(*pos); |
1369 | |
1370 | list_for_each_continue_rcu(n, &object_list) { |
1371 | next_obj = list_entry(n, struct kmemleak_object, object_list); |
1372 | if (get_object(next_obj)) |
1373 | break; |
1374 | } |
1375 | |
1376 | put_object(prev_obj); |
1377 | return next_obj; |
1378 | } |
1379 | |
1380 | /* |
1381 | * Decrement the use_count of the last object required, if any. |
1382 | */ |
1383 | static void kmemleak_seq_stop(struct seq_file *seq, void *v) |
1384 | { |
1385 | if (!IS_ERR(v)) { |
1386 | /* |
1387 | * kmemleak_seq_start may return ERR_PTR if the scan_mutex |
1388 | * waiting was interrupted, so only release it if !IS_ERR. |
1389 | */ |
1390 | rcu_read_unlock(); |
1391 | mutex_unlock(&scan_mutex); |
1392 | if (v) |
1393 | put_object(v); |
1394 | } |
1395 | } |
1396 | |
1397 | /* |
1398 | * Print the information for an unreferenced object to the seq file. |
1399 | */ |
1400 | static int kmemleak_seq_show(struct seq_file *seq, void *v) |
1401 | { |
1402 | struct kmemleak_object *object = v; |
1403 | unsigned long flags; |
1404 | |
1405 | spin_lock_irqsave(&object->lock, flags); |
1406 | if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) |
1407 | print_unreferenced(seq, object); |
1408 | spin_unlock_irqrestore(&object->lock, flags); |
1409 | return 0; |
1410 | } |
1411 | |
1412 | static const struct seq_operations kmemleak_seq_ops = { |
1413 | .start = kmemleak_seq_start, |
1414 | .next = kmemleak_seq_next, |
1415 | .stop = kmemleak_seq_stop, |
1416 | .show = kmemleak_seq_show, |
1417 | }; |
1418 | |
1419 | static int kmemleak_open(struct inode *inode, struct file *file) |
1420 | { |
1421 | if (!atomic_read(&kmemleak_enabled)) |
1422 | return -EBUSY; |
1423 | |
1424 | return seq_open(file, &kmemleak_seq_ops); |
1425 | } |
1426 | |
1427 | static int kmemleak_release(struct inode *inode, struct file *file) |
1428 | { |
1429 | return seq_release(inode, file); |
1430 | } |
1431 | |
1432 | static int dump_str_object_info(const char *str) |
1433 | { |
1434 | unsigned long flags; |
1435 | struct kmemleak_object *object; |
1436 | unsigned long addr; |
1437 | |
1438 | addr= simple_strtoul(str, NULL, 0); |
1439 | object = find_and_get_object(addr, 0); |
1440 | if (!object) { |
1441 | pr_info("Unknown object at 0x%08lx\n", addr); |
1442 | return -EINVAL; |
1443 | } |
1444 | |
1445 | spin_lock_irqsave(&object->lock, flags); |
1446 | dump_object_info(object); |
1447 | spin_unlock_irqrestore(&object->lock, flags); |
1448 | |
1449 | put_object(object); |
1450 | return 0; |
1451 | } |
1452 | |
1453 | /* |
1454 | * We use grey instead of black to ensure we can do future scans on the same |
1455 | * objects. If we did not do future scans these black objects could |
1456 | * potentially contain references to newly allocated objects in the future and |
1457 | * we'd end up with false positives. |
1458 | */ |
1459 | static void kmemleak_clear(void) |
1460 | { |
1461 | struct kmemleak_object *object; |
1462 | unsigned long flags; |
1463 | |
1464 | rcu_read_lock(); |
1465 | list_for_each_entry_rcu(object, &object_list, object_list) { |
1466 | spin_lock_irqsave(&object->lock, flags); |
1467 | if ((object->flags & OBJECT_REPORTED) && |
1468 | unreferenced_object(object)) |
1469 | __paint_it(object, KMEMLEAK_GREY); |
1470 | spin_unlock_irqrestore(&object->lock, flags); |
1471 | } |
1472 | rcu_read_unlock(); |
1473 | } |
1474 | |
1475 | /* |
1476 | * File write operation to configure kmemleak at run-time. The following |
1477 | * commands can be written to the /sys/kernel/debug/kmemleak file: |
1478 | * off - disable kmemleak (irreversible) |
1479 | * stack=on - enable the task stacks scanning |
1480 | * stack=off - disable the tasks stacks scanning |
1481 | * scan=on - start the automatic memory scanning thread |
1482 | * scan=off - stop the automatic memory scanning thread |
1483 | * scan=... - set the automatic memory scanning period in seconds (0 to |
1484 | * disable it) |
1485 | * scan - trigger a memory scan |
1486 | * clear - mark all current reported unreferenced kmemleak objects as |
1487 | * grey to ignore printing them |
1488 | * dump=... - dump information about the object found at the given address |
1489 | */ |
1490 | static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, |
1491 | size_t size, loff_t *ppos) |
1492 | { |
1493 | char buf[64]; |
1494 | int buf_size; |
1495 | int ret; |
1496 | |
1497 | buf_size = min(size, (sizeof(buf) - 1)); |
1498 | if (strncpy_from_user(buf, user_buf, buf_size) < 0) |
1499 | return -EFAULT; |
1500 | buf[buf_size] = 0; |
1501 | |
1502 | ret = mutex_lock_interruptible(&scan_mutex); |
1503 | if (ret < 0) |
1504 | return ret; |
1505 | |
1506 | if (strncmp(buf, "off", 3) == 0) |
1507 | kmemleak_disable(); |
1508 | else if (strncmp(buf, "stack=on", 8) == 0) |
1509 | kmemleak_stack_scan = 1; |
1510 | else if (strncmp(buf, "stack=off", 9) == 0) |
1511 | kmemleak_stack_scan = 0; |
1512 | else if (strncmp(buf, "scan=on", 7) == 0) |
1513 | start_scan_thread(); |
1514 | else if (strncmp(buf, "scan=off", 8) == 0) |
1515 | stop_scan_thread(); |
1516 | else if (strncmp(buf, "scan=", 5) == 0) { |
1517 | unsigned long secs; |
1518 | |
1519 | ret = strict_strtoul(buf + 5, 0, &secs); |
1520 | if (ret < 0) |
1521 | goto out; |
1522 | stop_scan_thread(); |
1523 | if (secs) { |
1524 | jiffies_scan_wait = msecs_to_jiffies(secs * 1000); |
1525 | start_scan_thread(); |
1526 | } |
1527 | } else if (strncmp(buf, "scan", 4) == 0) |
1528 | kmemleak_scan(); |
1529 | else if (strncmp(buf, "clear", 5) == 0) |
1530 | kmemleak_clear(); |
1531 | else if (strncmp(buf, "dump=", 5) == 0) |
1532 | ret = dump_str_object_info(buf + 5); |
1533 | else |
1534 | ret = -EINVAL; |
1535 | |
1536 | out: |
1537 | mutex_unlock(&scan_mutex); |
1538 | if (ret < 0) |
1539 | return ret; |
1540 | |
1541 | /* ignore the rest of the buffer, only one command at a time */ |
1542 | *ppos += size; |
1543 | return size; |
1544 | } |
1545 | |
1546 | static const struct file_operations kmemleak_fops = { |
1547 | .owner = THIS_MODULE, |
1548 | .open = kmemleak_open, |
1549 | .read = seq_read, |
1550 | .write = kmemleak_write, |
1551 | .llseek = seq_lseek, |
1552 | .release = kmemleak_release, |
1553 | }; |
1554 | |
1555 | /* |
1556 | * Perform the freeing of the kmemleak internal objects after waiting for any |
1557 | * current memory scan to complete. |
1558 | */ |
1559 | static void kmemleak_do_cleanup(struct work_struct *work) |
1560 | { |
1561 | struct kmemleak_object *object; |
1562 | |
1563 | mutex_lock(&scan_mutex); |
1564 | stop_scan_thread(); |
1565 | |
1566 | rcu_read_lock(); |
1567 | list_for_each_entry_rcu(object, &object_list, object_list) |
1568 | delete_object_full(object->pointer); |
1569 | rcu_read_unlock(); |
1570 | mutex_unlock(&scan_mutex); |
1571 | } |
1572 | |
1573 | static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup); |
1574 | |
1575 | /* |
1576 | * Disable kmemleak. No memory allocation/freeing will be traced once this |
1577 | * function is called. Disabling kmemleak is an irreversible operation. |
1578 | */ |
1579 | static void kmemleak_disable(void) |
1580 | { |
1581 | /* atomically check whether it was already invoked */ |
1582 | if (atomic_cmpxchg(&kmemleak_error, 0, 1)) |
1583 | return; |
1584 | |
1585 | /* stop any memory operation tracing */ |
1586 | atomic_set(&kmemleak_early_log, 0); |
1587 | atomic_set(&kmemleak_enabled, 0); |
1588 | |
1589 | /* check whether it is too early for a kernel thread */ |
1590 | if (atomic_read(&kmemleak_initialized)) |
1591 | schedule_work(&cleanup_work); |
1592 | |
1593 | pr_info("Kernel memory leak detector disabled\n"); |
1594 | } |
1595 | |
1596 | /* |
1597 | * Allow boot-time kmemleak disabling (enabled by default). |
1598 | */ |
1599 | static int kmemleak_boot_config(char *str) |
1600 | { |
1601 | if (!str) |
1602 | return -EINVAL; |
1603 | if (strcmp(str, "off") == 0) |
1604 | kmemleak_disable(); |
1605 | else if (strcmp(str, "on") != 0) |
1606 | return -EINVAL; |
1607 | return 0; |
1608 | } |
1609 | early_param("kmemleak", kmemleak_boot_config); |
1610 | |
1611 | /* |
1612 | * Kmemleak initialization. |
1613 | */ |
1614 | void __init kmemleak_init(void) |
1615 | { |
1616 | int i; |
1617 | unsigned long flags; |
1618 | |
1619 | jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE); |
1620 | jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000); |
1621 | |
1622 | object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE); |
1623 | scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE); |
1624 | INIT_PRIO_TREE_ROOT(&object_tree_root); |
1625 | |
1626 | /* the kernel is still in UP mode, so disabling the IRQs is enough */ |
1627 | local_irq_save(flags); |
1628 | if (!atomic_read(&kmemleak_error)) { |
1629 | atomic_set(&kmemleak_enabled, 1); |
1630 | atomic_set(&kmemleak_early_log, 0); |
1631 | } |
1632 | local_irq_restore(flags); |
1633 | |
1634 | /* |
1635 | * This is the point where tracking allocations is safe. Automatic |
1636 | * scanning is started during the late initcall. Add the early logged |
1637 | * callbacks to the kmemleak infrastructure. |
1638 | */ |
1639 | for (i = 0; i < crt_early_log; i++) { |
1640 | struct early_log *log = &early_log[i]; |
1641 | |
1642 | switch (log->op_type) { |
1643 | case KMEMLEAK_ALLOC: |
1644 | early_alloc(log); |
1645 | break; |
1646 | case KMEMLEAK_FREE: |
1647 | kmemleak_free(log->ptr); |
1648 | break; |
1649 | case KMEMLEAK_FREE_PART: |
1650 | kmemleak_free_part(log->ptr, log->size); |
1651 | break; |
1652 | case KMEMLEAK_NOT_LEAK: |
1653 | kmemleak_not_leak(log->ptr); |
1654 | break; |
1655 | case KMEMLEAK_IGNORE: |
1656 | kmemleak_ignore(log->ptr); |
1657 | break; |
1658 | case KMEMLEAK_SCAN_AREA: |
1659 | kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL); |
1660 | break; |
1661 | case KMEMLEAK_NO_SCAN: |
1662 | kmemleak_no_scan(log->ptr); |
1663 | break; |
1664 | default: |
1665 | WARN_ON(1); |
1666 | } |
1667 | } |
1668 | } |
1669 | |
1670 | /* |
1671 | * Late initialization function. |
1672 | */ |
1673 | static int __init kmemleak_late_init(void) |
1674 | { |
1675 | struct dentry *dentry; |
1676 | |
1677 | atomic_set(&kmemleak_initialized, 1); |
1678 | |
1679 | if (atomic_read(&kmemleak_error)) { |
1680 | /* |
1681 | * Some error occured and kmemleak was disabled. There is a |
1682 | * small chance that kmemleak_disable() was called immediately |
1683 | * after setting kmemleak_initialized and we may end up with |
1684 | * two clean-up threads but serialized by scan_mutex. |
1685 | */ |
1686 | schedule_work(&cleanup_work); |
1687 | return -ENOMEM; |
1688 | } |
1689 | |
1690 | dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL, |
1691 | &kmemleak_fops); |
1692 | if (!dentry) |
1693 | pr_warning("Failed to create the debugfs kmemleak file\n"); |
1694 | mutex_lock(&scan_mutex); |
1695 | start_scan_thread(); |
1696 | mutex_unlock(&scan_mutex); |
1697 | |
1698 | pr_info("Kernel memory leak detector initialized\n"); |
1699 | |
1700 | return 0; |
1701 | } |
1702 | late_initcall(kmemleak_late_init); |
1703 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9