Root/mm/kmemleak.c

1/*
2 * mm/kmemleak.c
3 *
4 * Copyright (C) 2008 ARM Limited
5 * Written by Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 *
21 * For more information on the algorithm and kmemleak usage, please see
22 * Documentation/kmemleak.txt.
23 *
24 * Notes on locking
25 * ----------------
26 *
27 * The following locks and mutexes are used by kmemleak:
28 *
29 * - kmemleak_lock (rwlock): protects the object_list modifications and
30 * accesses to the object_tree_root. The object_list is the main list
31 * holding the metadata (struct kmemleak_object) for the allocated memory
32 * blocks. The object_tree_root is a priority search tree used to look-up
33 * metadata based on a pointer to the corresponding memory block. The
34 * kmemleak_object structures are added to the object_list and
35 * object_tree_root in the create_object() function called from the
36 * kmemleak_alloc() callback and removed in delete_object() called from the
37 * kmemleak_free() callback
38 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
39 * the metadata (e.g. count) are protected by this lock. Note that some
40 * members of this structure may be protected by other means (atomic or
41 * kmemleak_lock). This lock is also held when scanning the corresponding
42 * memory block to avoid the kernel freeing it via the kmemleak_free()
43 * callback. This is less heavyweight than holding a global lock like
44 * kmemleak_lock during scanning
45 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
46 * unreferenced objects at a time. The gray_list contains the objects which
47 * are already referenced or marked as false positives and need to be
48 * scanned. This list is only modified during a scanning episode when the
49 * scan_mutex is held. At the end of a scan, the gray_list is always empty.
50 * Note that the kmemleak_object.use_count is incremented when an object is
51 * added to the gray_list and therefore cannot be freed. This mutex also
52 * prevents multiple users of the "kmemleak" debugfs file together with
53 * modifications to the memory scanning parameters including the scan_thread
54 * pointer
55 *
56 * The kmemleak_object structures have a use_count incremented or decremented
57 * using the get_object()/put_object() functions. When the use_count becomes
58 * 0, this count can no longer be incremented and put_object() schedules the
59 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
60 * function must be protected by rcu_read_lock() to avoid accessing a freed
61 * structure.
62 */
63
64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65
66#include <linux/init.h>
67#include <linux/kernel.h>
68#include <linux/list.h>
69#include <linux/sched.h>
70#include <linux/jiffies.h>
71#include <linux/delay.h>
72#include <linux/module.h>
73#include <linux/kthread.h>
74#include <linux/prio_tree.h>
75#include <linux/fs.h>
76#include <linux/debugfs.h>
77#include <linux/seq_file.h>
78#include <linux/cpumask.h>
79#include <linux/spinlock.h>
80#include <linux/mutex.h>
81#include <linux/rcupdate.h>
82#include <linux/stacktrace.h>
83#include <linux/cache.h>
84#include <linux/percpu.h>
85#include <linux/hardirq.h>
86#include <linux/mmzone.h>
87#include <linux/slab.h>
88#include <linux/thread_info.h>
89#include <linux/err.h>
90#include <linux/uaccess.h>
91#include <linux/string.h>
92#include <linux/nodemask.h>
93#include <linux/mm.h>
94#include <linux/workqueue.h>
95#include <linux/crc32.h>
96
97#include <asm/sections.h>
98#include <asm/processor.h>
99#include <asm/atomic.h>
100
101#include <linux/kmemcheck.h>
102#include <linux/kmemleak.h>
103
104/*
105 * Kmemleak configuration and common defines.
106 */
107#define MAX_TRACE 16 /* stack trace length */
108#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
109#define SECS_FIRST_SCAN 60 /* delay before the first scan */
110#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
111#define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
112
113#define BYTES_PER_POINTER sizeof(void *)
114
115/* GFP bitmask for kmemleak internal allocations */
116#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
117                 __GFP_NORETRY | __GFP_NOMEMALLOC | \
118                 __GFP_NOWARN)
119
120/* scanning area inside a memory block */
121struct kmemleak_scan_area {
122    struct hlist_node node;
123    unsigned long start;
124    size_t size;
125};
126
127#define KMEMLEAK_GREY 0
128#define KMEMLEAK_BLACK -1
129
130/*
131 * Structure holding the metadata for each allocated memory block.
132 * Modifications to such objects should be made while holding the
133 * object->lock. Insertions or deletions from object_list, gray_list or
134 * tree_node are already protected by the corresponding locks or mutex (see
135 * the notes on locking above). These objects are reference-counted
136 * (use_count) and freed using the RCU mechanism.
137 */
138struct kmemleak_object {
139    spinlock_t lock;
140    unsigned long flags; /* object status flags */
141    struct list_head object_list;
142    struct list_head gray_list;
143    struct prio_tree_node tree_node;
144    struct rcu_head rcu; /* object_list lockless traversal */
145    /* object usage count; object freed when use_count == 0 */
146    atomic_t use_count;
147    unsigned long pointer;
148    size_t size;
149    /* minimum number of a pointers found before it is considered leak */
150    int min_count;
151    /* the total number of pointers found pointing to this object */
152    int count;
153    /* checksum for detecting modified objects */
154    u32 checksum;
155    /* memory ranges to be scanned inside an object (empty for all) */
156    struct hlist_head area_list;
157    unsigned long trace[MAX_TRACE];
158    unsigned int trace_len;
159    unsigned long jiffies; /* creation timestamp */
160    pid_t pid; /* pid of the current task */
161    char comm[TASK_COMM_LEN]; /* executable name */
162};
163
164/* flag representing the memory block allocation status */
165#define OBJECT_ALLOCATED (1 << 0)
166/* flag set after the first reporting of an unreference object */
167#define OBJECT_REPORTED (1 << 1)
168/* flag set to not scan the object */
169#define OBJECT_NO_SCAN (1 << 2)
170
171/* number of bytes to print per line; must be 16 or 32 */
172#define HEX_ROW_SIZE 16
173/* number of bytes to print at a time (1, 2, 4, 8) */
174#define HEX_GROUP_SIZE 1
175/* include ASCII after the hex output */
176#define HEX_ASCII 1
177/* max number of lines to be printed */
178#define HEX_MAX_LINES 2
179
180/* the list of all allocated objects */
181static LIST_HEAD(object_list);
182/* the list of gray-colored objects (see color_gray comment below) */
183static LIST_HEAD(gray_list);
184/* prio search tree for object boundaries */
185static struct prio_tree_root object_tree_root;
186/* rw_lock protecting the access to object_list and prio_tree_root */
187static DEFINE_RWLOCK(kmemleak_lock);
188
189/* allocation caches for kmemleak internal data */
190static struct kmem_cache *object_cache;
191static struct kmem_cache *scan_area_cache;
192
193/* set if tracing memory operations is enabled */
194static atomic_t kmemleak_enabled = ATOMIC_INIT(0);
195/* set in the late_initcall if there were no errors */
196static atomic_t kmemleak_initialized = ATOMIC_INIT(0);
197/* enables or disables early logging of the memory operations */
198static atomic_t kmemleak_early_log = ATOMIC_INIT(1);
199/* set if a fata kmemleak error has occurred */
200static atomic_t kmemleak_error = ATOMIC_INIT(0);
201
202/* minimum and maximum address that may be valid pointers */
203static unsigned long min_addr = ULONG_MAX;
204static unsigned long max_addr;
205
206static struct task_struct *scan_thread;
207/* used to avoid reporting of recently allocated objects */
208static unsigned long jiffies_min_age;
209static unsigned long jiffies_last_scan;
210/* delay between automatic memory scannings */
211static signed long jiffies_scan_wait;
212/* enables or disables the task stacks scanning */
213static int kmemleak_stack_scan = 1;
214/* protects the memory scanning, parameters and debug/kmemleak file access */
215static DEFINE_MUTEX(scan_mutex);
216/* setting kmemleak=on, will set this var, skipping the disable */
217static int kmemleak_skip_disable;
218
219
220/*
221 * Early object allocation/freeing logging. Kmemleak is initialized after the
222 * kernel allocator. However, both the kernel allocator and kmemleak may
223 * allocate memory blocks which need to be tracked. Kmemleak defines an
224 * arbitrary buffer to hold the allocation/freeing information before it is
225 * fully initialized.
226 */
227
228/* kmemleak operation type for early logging */
229enum {
230    KMEMLEAK_ALLOC,
231    KMEMLEAK_FREE,
232    KMEMLEAK_FREE_PART,
233    KMEMLEAK_NOT_LEAK,
234    KMEMLEAK_IGNORE,
235    KMEMLEAK_SCAN_AREA,
236    KMEMLEAK_NO_SCAN
237};
238
239/*
240 * Structure holding the information passed to kmemleak callbacks during the
241 * early logging.
242 */
243struct early_log {
244    int op_type; /* kmemleak operation type */
245    const void *ptr; /* allocated/freed memory block */
246    size_t size; /* memory block size */
247    int min_count; /* minimum reference count */
248    unsigned long trace[MAX_TRACE]; /* stack trace */
249    unsigned int trace_len; /* stack trace length */
250};
251
252/* early logging buffer and current position */
253static struct early_log
254    early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
255static int crt_early_log __initdata;
256
257static void kmemleak_disable(void);
258
259/*
260 * Print a warning and dump the stack trace.
261 */
262#define kmemleak_warn(x...) do { \
263    pr_warning(x); \
264    dump_stack(); \
265} while (0)
266
267/*
268 * Macro invoked when a serious kmemleak condition occurred and cannot be
269 * recovered from. Kmemleak will be disabled and further allocation/freeing
270 * tracing no longer available.
271 */
272#define kmemleak_stop(x...) do { \
273    kmemleak_warn(x); \
274    kmemleak_disable(); \
275} while (0)
276
277/*
278 * Printing of the objects hex dump to the seq file. The number of lines to be
279 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
280 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
281 * with the object->lock held.
282 */
283static void hex_dump_object(struct seq_file *seq,
284                struct kmemleak_object *object)
285{
286    const u8 *ptr = (const u8 *)object->pointer;
287    int i, len, remaining;
288    unsigned char linebuf[HEX_ROW_SIZE * 5];
289
290    /* limit the number of lines to HEX_MAX_LINES */
291    remaining = len =
292        min(object->size, (size_t)(HEX_MAX_LINES * HEX_ROW_SIZE));
293
294    seq_printf(seq, " hex dump (first %d bytes):\n", len);
295    for (i = 0; i < len; i += HEX_ROW_SIZE) {
296        int linelen = min(remaining, HEX_ROW_SIZE);
297
298        remaining -= HEX_ROW_SIZE;
299        hex_dump_to_buffer(ptr + i, linelen, HEX_ROW_SIZE,
300                   HEX_GROUP_SIZE, linebuf, sizeof(linebuf),
301                   HEX_ASCII);
302        seq_printf(seq, " %s\n", linebuf);
303    }
304}
305
306/*
307 * Object colors, encoded with count and min_count:
308 * - white - orphan object, not enough references to it (count < min_count)
309 * - gray - not orphan, not marked as false positive (min_count == 0) or
310 * sufficient references to it (count >= min_count)
311 * - black - ignore, it doesn't contain references (e.g. text section)
312 * (min_count == -1). No function defined for this color.
313 * Newly created objects don't have any color assigned (object->count == -1)
314 * before the next memory scan when they become white.
315 */
316static bool color_white(const struct kmemleak_object *object)
317{
318    return object->count != KMEMLEAK_BLACK &&
319        object->count < object->min_count;
320}
321
322static bool color_gray(const struct kmemleak_object *object)
323{
324    return object->min_count != KMEMLEAK_BLACK &&
325        object->count >= object->min_count;
326}
327
328/*
329 * Objects are considered unreferenced only if their color is white, they have
330 * not be deleted and have a minimum age to avoid false positives caused by
331 * pointers temporarily stored in CPU registers.
332 */
333static bool unreferenced_object(struct kmemleak_object *object)
334{
335    return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
336        time_before_eq(object->jiffies + jiffies_min_age,
337                   jiffies_last_scan);
338}
339
340/*
341 * Printing of the unreferenced objects information to the seq file. The
342 * print_unreferenced function must be called with the object->lock held.
343 */
344static void print_unreferenced(struct seq_file *seq,
345                   struct kmemleak_object *object)
346{
347    int i;
348    unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
349
350    seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
351           object->pointer, object->size);
352    seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
353           object->comm, object->pid, object->jiffies,
354           msecs_age / 1000, msecs_age % 1000);
355    hex_dump_object(seq, object);
356    seq_printf(seq, " backtrace:\n");
357
358    for (i = 0; i < object->trace_len; i++) {
359        void *ptr = (void *)object->trace[i];
360        seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
361    }
362}
363
364/*
365 * Print the kmemleak_object information. This function is used mainly for
366 * debugging special cases when kmemleak operations. It must be called with
367 * the object->lock held.
368 */
369static void dump_object_info(struct kmemleak_object *object)
370{
371    struct stack_trace trace;
372
373    trace.nr_entries = object->trace_len;
374    trace.entries = object->trace;
375
376    pr_notice("Object 0x%08lx (size %zu):\n",
377          object->tree_node.start, object->size);
378    pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
379          object->comm, object->pid, object->jiffies);
380    pr_notice(" min_count = %d\n", object->min_count);
381    pr_notice(" count = %d\n", object->count);
382    pr_notice(" flags = 0x%lx\n", object->flags);
383    pr_notice(" checksum = %d\n", object->checksum);
384    pr_notice(" backtrace:\n");
385    print_stack_trace(&trace, 4);
386}
387
388/*
389 * Look-up a memory block metadata (kmemleak_object) in the priority search
390 * tree based on a pointer value. If alias is 0, only values pointing to the
391 * beginning of the memory block are allowed. The kmemleak_lock must be held
392 * when calling this function.
393 */
394static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
395{
396    struct prio_tree_node *node;
397    struct prio_tree_iter iter;
398    struct kmemleak_object *object;
399
400    prio_tree_iter_init(&iter, &object_tree_root, ptr, ptr);
401    node = prio_tree_next(&iter);
402    if (node) {
403        object = prio_tree_entry(node, struct kmemleak_object,
404                     tree_node);
405        if (!alias && object->pointer != ptr) {
406            pr_warning("Found object by alias at 0x%08lx\n", ptr);
407            dump_stack();
408            dump_object_info(object);
409            object = NULL;
410        }
411    } else
412        object = NULL;
413
414    return object;
415}
416
417/*
418 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
419 * that once an object's use_count reached 0, the RCU freeing was already
420 * registered and the object should no longer be used. This function must be
421 * called under the protection of rcu_read_lock().
422 */
423static int get_object(struct kmemleak_object *object)
424{
425    return atomic_inc_not_zero(&object->use_count);
426}
427
428/*
429 * RCU callback to free a kmemleak_object.
430 */
431static void free_object_rcu(struct rcu_head *rcu)
432{
433    struct hlist_node *elem, *tmp;
434    struct kmemleak_scan_area *area;
435    struct kmemleak_object *object =
436        container_of(rcu, struct kmemleak_object, rcu);
437
438    /*
439     * Once use_count is 0 (guaranteed by put_object), there is no other
440     * code accessing this object, hence no need for locking.
441     */
442    hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) {
443        hlist_del(elem);
444        kmem_cache_free(scan_area_cache, area);
445    }
446    kmem_cache_free(object_cache, object);
447}
448
449/*
450 * Decrement the object use_count. Once the count is 0, free the object using
451 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
452 * delete_object() path, the delayed RCU freeing ensures that there is no
453 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
454 * is also possible.
455 */
456static void put_object(struct kmemleak_object *object)
457{
458    if (!atomic_dec_and_test(&object->use_count))
459        return;
460
461    /* should only get here after delete_object was called */
462    WARN_ON(object->flags & OBJECT_ALLOCATED);
463
464    call_rcu(&object->rcu, free_object_rcu);
465}
466
467/*
468 * Look up an object in the prio search tree and increase its use_count.
469 */
470static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
471{
472    unsigned long flags;
473    struct kmemleak_object *object = NULL;
474
475    rcu_read_lock();
476    read_lock_irqsave(&kmemleak_lock, flags);
477    if (ptr >= min_addr && ptr < max_addr)
478        object = lookup_object(ptr, alias);
479    read_unlock_irqrestore(&kmemleak_lock, flags);
480
481    /* check whether the object is still available */
482    if (object && !get_object(object))
483        object = NULL;
484    rcu_read_unlock();
485
486    return object;
487}
488
489/*
490 * Save stack trace to the given array of MAX_TRACE size.
491 */
492static int __save_stack_trace(unsigned long *trace)
493{
494    struct stack_trace stack_trace;
495
496    stack_trace.max_entries = MAX_TRACE;
497    stack_trace.nr_entries = 0;
498    stack_trace.entries = trace;
499    stack_trace.skip = 2;
500    save_stack_trace(&stack_trace);
501
502    return stack_trace.nr_entries;
503}
504
505/*
506 * Create the metadata (struct kmemleak_object) corresponding to an allocated
507 * memory block and add it to the object_list and object_tree_root.
508 */
509static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
510                         int min_count, gfp_t gfp)
511{
512    unsigned long flags;
513    struct kmemleak_object *object;
514    struct prio_tree_node *node;
515
516    object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
517    if (!object) {
518        pr_warning("Cannot allocate a kmemleak_object structure\n");
519        kmemleak_disable();
520        return NULL;
521    }
522
523    INIT_LIST_HEAD(&object->object_list);
524    INIT_LIST_HEAD(&object->gray_list);
525    INIT_HLIST_HEAD(&object->area_list);
526    spin_lock_init(&object->lock);
527    atomic_set(&object->use_count, 1);
528    object->flags = OBJECT_ALLOCATED;
529    object->pointer = ptr;
530    object->size = size;
531    object->min_count = min_count;
532    object->count = 0; /* white color initially */
533    object->jiffies = jiffies;
534    object->checksum = 0;
535
536    /* task information */
537    if (in_irq()) {
538        object->pid = 0;
539        strncpy(object->comm, "hardirq", sizeof(object->comm));
540    } else if (in_softirq()) {
541        object->pid = 0;
542        strncpy(object->comm, "softirq", sizeof(object->comm));
543    } else {
544        object->pid = current->pid;
545        /*
546         * There is a small chance of a race with set_task_comm(),
547         * however using get_task_comm() here may cause locking
548         * dependency issues with current->alloc_lock. In the worst
549         * case, the command line is not correct.
550         */
551        strncpy(object->comm, current->comm, sizeof(object->comm));
552    }
553
554    /* kernel backtrace */
555    object->trace_len = __save_stack_trace(object->trace);
556
557    INIT_PRIO_TREE_NODE(&object->tree_node);
558    object->tree_node.start = ptr;
559    object->tree_node.last = ptr + size - 1;
560
561    write_lock_irqsave(&kmemleak_lock, flags);
562
563    min_addr = min(min_addr, ptr);
564    max_addr = max(max_addr, ptr + size);
565    node = prio_tree_insert(&object_tree_root, &object->tree_node);
566    /*
567     * The code calling the kernel does not yet have the pointer to the
568     * memory block to be able to free it. However, we still hold the
569     * kmemleak_lock here in case parts of the kernel started freeing
570     * random memory blocks.
571     */
572    if (node != &object->tree_node) {
573        kmemleak_stop("Cannot insert 0x%lx into the object search tree "
574                  "(already existing)\n", ptr);
575        object = lookup_object(ptr, 1);
576        spin_lock(&object->lock);
577        dump_object_info(object);
578        spin_unlock(&object->lock);
579
580        goto out;
581    }
582    list_add_tail_rcu(&object->object_list, &object_list);
583out:
584    write_unlock_irqrestore(&kmemleak_lock, flags);
585    return object;
586}
587
588/*
589 * Remove the metadata (struct kmemleak_object) for a memory block from the
590 * object_list and object_tree_root and decrement its use_count.
591 */
592static void __delete_object(struct kmemleak_object *object)
593{
594    unsigned long flags;
595
596    write_lock_irqsave(&kmemleak_lock, flags);
597    prio_tree_remove(&object_tree_root, &object->tree_node);
598    list_del_rcu(&object->object_list);
599    write_unlock_irqrestore(&kmemleak_lock, flags);
600
601    WARN_ON(!(object->flags & OBJECT_ALLOCATED));
602    WARN_ON(atomic_read(&object->use_count) < 2);
603
604    /*
605     * Locking here also ensures that the corresponding memory block
606     * cannot be freed when it is being scanned.
607     */
608    spin_lock_irqsave(&object->lock, flags);
609    object->flags &= ~OBJECT_ALLOCATED;
610    spin_unlock_irqrestore(&object->lock, flags);
611    put_object(object);
612}
613
614/*
615 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
616 * delete it.
617 */
618static void delete_object_full(unsigned long ptr)
619{
620    struct kmemleak_object *object;
621
622    object = find_and_get_object(ptr, 0);
623    if (!object) {
624#ifdef DEBUG
625        kmemleak_warn("Freeing unknown object at 0x%08lx\n",
626                  ptr);
627#endif
628        return;
629    }
630    __delete_object(object);
631    put_object(object);
632}
633
634/*
635 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
636 * delete it. If the memory block is partially freed, the function may create
637 * additional metadata for the remaining parts of the block.
638 */
639static void delete_object_part(unsigned long ptr, size_t size)
640{
641    struct kmemleak_object *object;
642    unsigned long start, end;
643
644    object = find_and_get_object(ptr, 1);
645    if (!object) {
646#ifdef DEBUG
647        kmemleak_warn("Partially freeing unknown object at 0x%08lx "
648                  "(size %zu)\n", ptr, size);
649#endif
650        return;
651    }
652    __delete_object(object);
653
654    /*
655     * Create one or two objects that may result from the memory block
656     * split. Note that partial freeing is only done by free_bootmem() and
657     * this happens before kmemleak_init() is called. The path below is
658     * only executed during early log recording in kmemleak_init(), so
659     * GFP_KERNEL is enough.
660     */
661    start = object->pointer;
662    end = object->pointer + object->size;
663    if (ptr > start)
664        create_object(start, ptr - start, object->min_count,
665                  GFP_KERNEL);
666    if (ptr + size < end)
667        create_object(ptr + size, end - ptr - size, object->min_count,
668                  GFP_KERNEL);
669
670    put_object(object);
671}
672
673static void __paint_it(struct kmemleak_object *object, int color)
674{
675    object->min_count = color;
676    if (color == KMEMLEAK_BLACK)
677        object->flags |= OBJECT_NO_SCAN;
678}
679
680static void paint_it(struct kmemleak_object *object, int color)
681{
682    unsigned long flags;
683
684    spin_lock_irqsave(&object->lock, flags);
685    __paint_it(object, color);
686    spin_unlock_irqrestore(&object->lock, flags);
687}
688
689static void paint_ptr(unsigned long ptr, int color)
690{
691    struct kmemleak_object *object;
692
693    object = find_and_get_object(ptr, 0);
694    if (!object) {
695        kmemleak_warn("Trying to color unknown object "
696                  "at 0x%08lx as %s\n", ptr,
697                  (color == KMEMLEAK_GREY) ? "Grey" :
698                  (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
699        return;
700    }
701    paint_it(object, color);
702    put_object(object);
703}
704
705/*
706 * Mark an object permanently as gray-colored so that it can no longer be
707 * reported as a leak. This is used in general to mark a false positive.
708 */
709static void make_gray_object(unsigned long ptr)
710{
711    paint_ptr(ptr, KMEMLEAK_GREY);
712}
713
714/*
715 * Mark the object as black-colored so that it is ignored from scans and
716 * reporting.
717 */
718static void make_black_object(unsigned long ptr)
719{
720    paint_ptr(ptr, KMEMLEAK_BLACK);
721}
722
723/*
724 * Add a scanning area to the object. If at least one such area is added,
725 * kmemleak will only scan these ranges rather than the whole memory block.
726 */
727static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
728{
729    unsigned long flags;
730    struct kmemleak_object *object;
731    struct kmemleak_scan_area *area;
732
733    object = find_and_get_object(ptr, 1);
734    if (!object) {
735        kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
736                  ptr);
737        return;
738    }
739
740    area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
741    if (!area) {
742        pr_warning("Cannot allocate a scan area\n");
743        goto out;
744    }
745
746    spin_lock_irqsave(&object->lock, flags);
747    if (ptr + size > object->pointer + object->size) {
748        kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
749        dump_object_info(object);
750        kmem_cache_free(scan_area_cache, area);
751        goto out_unlock;
752    }
753
754    INIT_HLIST_NODE(&area->node);
755    area->start = ptr;
756    area->size = size;
757
758    hlist_add_head(&area->node, &object->area_list);
759out_unlock:
760    spin_unlock_irqrestore(&object->lock, flags);
761out:
762    put_object(object);
763}
764
765/*
766 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
767 * pointer. Such object will not be scanned by kmemleak but references to it
768 * are searched.
769 */
770static void object_no_scan(unsigned long ptr)
771{
772    unsigned long flags;
773    struct kmemleak_object *object;
774
775    object = find_and_get_object(ptr, 0);
776    if (!object) {
777        kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
778        return;
779    }
780
781    spin_lock_irqsave(&object->lock, flags);
782    object->flags |= OBJECT_NO_SCAN;
783    spin_unlock_irqrestore(&object->lock, flags);
784    put_object(object);
785}
786
787/*
788 * Log an early kmemleak_* call to the early_log buffer. These calls will be
789 * processed later once kmemleak is fully initialized.
790 */
791static void __init log_early(int op_type, const void *ptr, size_t size,
792                 int min_count)
793{
794    unsigned long flags;
795    struct early_log *log;
796
797    if (crt_early_log >= ARRAY_SIZE(early_log)) {
798        pr_warning("Early log buffer exceeded, "
799               "please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n");
800        kmemleak_disable();
801        return;
802    }
803
804    /*
805     * There is no need for locking since the kernel is still in UP mode
806     * at this stage. Disabling the IRQs is enough.
807     */
808    local_irq_save(flags);
809    log = &early_log[crt_early_log];
810    log->op_type = op_type;
811    log->ptr = ptr;
812    log->size = size;
813    log->min_count = min_count;
814    if (op_type == KMEMLEAK_ALLOC)
815        log->trace_len = __save_stack_trace(log->trace);
816    crt_early_log++;
817    local_irq_restore(flags);
818}
819
820/*
821 * Log an early allocated block and populate the stack trace.
822 */
823static void early_alloc(struct early_log *log)
824{
825    struct kmemleak_object *object;
826    unsigned long flags;
827    int i;
828
829    if (!atomic_read(&kmemleak_enabled) || !log->ptr || IS_ERR(log->ptr))
830        return;
831
832    /*
833     * RCU locking needed to ensure object is not freed via put_object().
834     */
835    rcu_read_lock();
836    object = create_object((unsigned long)log->ptr, log->size,
837                   log->min_count, GFP_ATOMIC);
838    if (!object)
839        goto out;
840    spin_lock_irqsave(&object->lock, flags);
841    for (i = 0; i < log->trace_len; i++)
842        object->trace[i] = log->trace[i];
843    object->trace_len = log->trace_len;
844    spin_unlock_irqrestore(&object->lock, flags);
845out:
846    rcu_read_unlock();
847}
848
849/**
850 * kmemleak_alloc - register a newly allocated object
851 * @ptr: pointer to beginning of the object
852 * @size: size of the object
853 * @min_count: minimum number of references to this object. If during memory
854 * scanning a number of references less than @min_count is found,
855 * the object is reported as a memory leak. If @min_count is 0,
856 * the object is never reported as a leak. If @min_count is -1,
857 * the object is ignored (not scanned and not reported as a leak)
858 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
859 *
860 * This function is called from the kernel allocators when a new object
861 * (memory block) is allocated (kmem_cache_alloc, kmalloc, vmalloc etc.).
862 */
863void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
864              gfp_t gfp)
865{
866    pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
867
868    if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
869        create_object((unsigned long)ptr, size, min_count, gfp);
870    else if (atomic_read(&kmemleak_early_log))
871        log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
872}
873EXPORT_SYMBOL_GPL(kmemleak_alloc);
874
875/**
876 * kmemleak_free - unregister a previously registered object
877 * @ptr: pointer to beginning of the object
878 *
879 * This function is called from the kernel allocators when an object (memory
880 * block) is freed (kmem_cache_free, kfree, vfree etc.).
881 */
882void __ref kmemleak_free(const void *ptr)
883{
884    pr_debug("%s(0x%p)\n", __func__, ptr);
885
886    if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
887        delete_object_full((unsigned long)ptr);
888    else if (atomic_read(&kmemleak_early_log))
889        log_early(KMEMLEAK_FREE, ptr, 0, 0);
890}
891EXPORT_SYMBOL_GPL(kmemleak_free);
892
893/**
894 * kmemleak_free_part - partially unregister a previously registered object
895 * @ptr: pointer to the beginning or inside the object. This also
896 * represents the start of the range to be freed
897 * @size: size to be unregistered
898 *
899 * This function is called when only a part of a memory block is freed
900 * (usually from the bootmem allocator).
901 */
902void __ref kmemleak_free_part(const void *ptr, size_t size)
903{
904    pr_debug("%s(0x%p)\n", __func__, ptr);
905
906    if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
907        delete_object_part((unsigned long)ptr, size);
908    else if (atomic_read(&kmemleak_early_log))
909        log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
910}
911EXPORT_SYMBOL_GPL(kmemleak_free_part);
912
913/**
914 * kmemleak_not_leak - mark an allocated object as false positive
915 * @ptr: pointer to beginning of the object
916 *
917 * Calling this function on an object will cause the memory block to no longer
918 * be reported as leak and always be scanned.
919 */
920void __ref kmemleak_not_leak(const void *ptr)
921{
922    pr_debug("%s(0x%p)\n", __func__, ptr);
923
924    if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
925        make_gray_object((unsigned long)ptr);
926    else if (atomic_read(&kmemleak_early_log))
927        log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
928}
929EXPORT_SYMBOL(kmemleak_not_leak);
930
931/**
932 * kmemleak_ignore - ignore an allocated object
933 * @ptr: pointer to beginning of the object
934 *
935 * Calling this function on an object will cause the memory block to be
936 * ignored (not scanned and not reported as a leak). This is usually done when
937 * it is known that the corresponding block is not a leak and does not contain
938 * any references to other allocated memory blocks.
939 */
940void __ref kmemleak_ignore(const void *ptr)
941{
942    pr_debug("%s(0x%p)\n", __func__, ptr);
943
944    if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
945        make_black_object((unsigned long)ptr);
946    else if (atomic_read(&kmemleak_early_log))
947        log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
948}
949EXPORT_SYMBOL(kmemleak_ignore);
950
951/**
952 * kmemleak_scan_area - limit the range to be scanned in an allocated object
953 * @ptr: pointer to beginning or inside the object. This also
954 * represents the start of the scan area
955 * @size: size of the scan area
956 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
957 *
958 * This function is used when it is known that only certain parts of an object
959 * contain references to other objects. Kmemleak will only scan these areas
960 * reducing the number false negatives.
961 */
962void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
963{
964    pr_debug("%s(0x%p)\n", __func__, ptr);
965
966    if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
967        add_scan_area((unsigned long)ptr, size, gfp);
968    else if (atomic_read(&kmemleak_early_log))
969        log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
970}
971EXPORT_SYMBOL(kmemleak_scan_area);
972
973/**
974 * kmemleak_no_scan - do not scan an allocated object
975 * @ptr: pointer to beginning of the object
976 *
977 * This function notifies kmemleak not to scan the given memory block. Useful
978 * in situations where it is known that the given object does not contain any
979 * references to other objects. Kmemleak will not scan such objects reducing
980 * the number of false negatives.
981 */
982void __ref kmemleak_no_scan(const void *ptr)
983{
984    pr_debug("%s(0x%p)\n", __func__, ptr);
985
986    if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
987        object_no_scan((unsigned long)ptr);
988    else if (atomic_read(&kmemleak_early_log))
989        log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
990}
991EXPORT_SYMBOL(kmemleak_no_scan);
992
993/*
994 * Update an object's checksum and return true if it was modified.
995 */
996static bool update_checksum(struct kmemleak_object *object)
997{
998    u32 old_csum = object->checksum;
999
1000    if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
1001        return false;
1002
1003    object->checksum = crc32(0, (void *)object->pointer, object->size);
1004    return object->checksum != old_csum;
1005}
1006
1007/*
1008 * Memory scanning is a long process and it needs to be interruptable. This
1009 * function checks whether such interrupt condition occurred.
1010 */
1011static int scan_should_stop(void)
1012{
1013    if (!atomic_read(&kmemleak_enabled))
1014        return 1;
1015
1016    /*
1017     * This function may be called from either process or kthread context,
1018     * hence the need to check for both stop conditions.
1019     */
1020    if (current->mm)
1021        return signal_pending(current);
1022    else
1023        return kthread_should_stop();
1024
1025    return 0;
1026}
1027
1028/*
1029 * Scan a memory block (exclusive range) for valid pointers and add those
1030 * found to the gray list.
1031 */
1032static void scan_block(void *_start, void *_end,
1033               struct kmemleak_object *scanned, int allow_resched)
1034{
1035    unsigned long *ptr;
1036    unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1037    unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1038
1039    for (ptr = start; ptr < end; ptr++) {
1040        struct kmemleak_object *object;
1041        unsigned long flags;
1042        unsigned long pointer;
1043
1044        if (allow_resched)
1045            cond_resched();
1046        if (scan_should_stop())
1047            break;
1048
1049        /* don't scan uninitialized memory */
1050        if (!kmemcheck_is_obj_initialized((unsigned long)ptr,
1051                          BYTES_PER_POINTER))
1052            continue;
1053
1054        pointer = *ptr;
1055
1056        object = find_and_get_object(pointer, 1);
1057        if (!object)
1058            continue;
1059        if (object == scanned) {
1060            /* self referenced, ignore */
1061            put_object(object);
1062            continue;
1063        }
1064
1065        /*
1066         * Avoid the lockdep recursive warning on object->lock being
1067         * previously acquired in scan_object(). These locks are
1068         * enclosed by scan_mutex.
1069         */
1070        spin_lock_irqsave_nested(&object->lock, flags,
1071                     SINGLE_DEPTH_NESTING);
1072        if (!color_white(object)) {
1073            /* non-orphan, ignored or new */
1074            spin_unlock_irqrestore(&object->lock, flags);
1075            put_object(object);
1076            continue;
1077        }
1078
1079        /*
1080         * Increase the object's reference count (number of pointers
1081         * to the memory block). If this count reaches the required
1082         * minimum, the object's color will become gray and it will be
1083         * added to the gray_list.
1084         */
1085        object->count++;
1086        if (color_gray(object)) {
1087            list_add_tail(&object->gray_list, &gray_list);
1088            spin_unlock_irqrestore(&object->lock, flags);
1089            continue;
1090        }
1091
1092        spin_unlock_irqrestore(&object->lock, flags);
1093        put_object(object);
1094    }
1095}
1096
1097/*
1098 * Scan a memory block corresponding to a kmemleak_object. A condition is
1099 * that object->use_count >= 1.
1100 */
1101static void scan_object(struct kmemleak_object *object)
1102{
1103    struct kmemleak_scan_area *area;
1104    struct hlist_node *elem;
1105    unsigned long flags;
1106
1107    /*
1108     * Once the object->lock is acquired, the corresponding memory block
1109     * cannot be freed (the same lock is acquired in delete_object).
1110     */
1111    spin_lock_irqsave(&object->lock, flags);
1112    if (object->flags & OBJECT_NO_SCAN)
1113        goto out;
1114    if (!(object->flags & OBJECT_ALLOCATED))
1115        /* already freed object */
1116        goto out;
1117    if (hlist_empty(&object->area_list)) {
1118        void *start = (void *)object->pointer;
1119        void *end = (void *)(object->pointer + object->size);
1120
1121        while (start < end && (object->flags & OBJECT_ALLOCATED) &&
1122               !(object->flags & OBJECT_NO_SCAN)) {
1123            scan_block(start, min(start + MAX_SCAN_SIZE, end),
1124                   object, 0);
1125            start += MAX_SCAN_SIZE;
1126
1127            spin_unlock_irqrestore(&object->lock, flags);
1128            cond_resched();
1129            spin_lock_irqsave(&object->lock, flags);
1130        }
1131    } else
1132        hlist_for_each_entry(area, elem, &object->area_list, node)
1133            scan_block((void *)area->start,
1134                   (void *)(area->start + area->size),
1135                   object, 0);
1136out:
1137    spin_unlock_irqrestore(&object->lock, flags);
1138}
1139
1140/*
1141 * Scan the objects already referenced (gray objects). More objects will be
1142 * referenced and, if there are no memory leaks, all the objects are scanned.
1143 */
1144static void scan_gray_list(void)
1145{
1146    struct kmemleak_object *object, *tmp;
1147
1148    /*
1149     * The list traversal is safe for both tail additions and removals
1150     * from inside the loop. The kmemleak objects cannot be freed from
1151     * outside the loop because their use_count was incremented.
1152     */
1153    object = list_entry(gray_list.next, typeof(*object), gray_list);
1154    while (&object->gray_list != &gray_list) {
1155        cond_resched();
1156
1157        /* may add new objects to the list */
1158        if (!scan_should_stop())
1159            scan_object(object);
1160
1161        tmp = list_entry(object->gray_list.next, typeof(*object),
1162                 gray_list);
1163
1164        /* remove the object from the list and release it */
1165        list_del(&object->gray_list);
1166        put_object(object);
1167
1168        object = tmp;
1169    }
1170    WARN_ON(!list_empty(&gray_list));
1171}
1172
1173/*
1174 * Scan data sections and all the referenced memory blocks allocated via the
1175 * kernel's standard allocators. This function must be called with the
1176 * scan_mutex held.
1177 */
1178static void kmemleak_scan(void)
1179{
1180    unsigned long flags;
1181    struct kmemleak_object *object;
1182    int i;
1183    int new_leaks = 0;
1184
1185    jiffies_last_scan = jiffies;
1186
1187    /* prepare the kmemleak_object's */
1188    rcu_read_lock();
1189    list_for_each_entry_rcu(object, &object_list, object_list) {
1190        spin_lock_irqsave(&object->lock, flags);
1191#ifdef DEBUG
1192        /*
1193         * With a few exceptions there should be a maximum of
1194         * 1 reference to any object at this point.
1195         */
1196        if (atomic_read(&object->use_count) > 1) {
1197            pr_debug("object->use_count = %d\n",
1198                 atomic_read(&object->use_count));
1199            dump_object_info(object);
1200        }
1201#endif
1202        /* reset the reference count (whiten the object) */
1203        object->count = 0;
1204        if (color_gray(object) && get_object(object))
1205            list_add_tail(&object->gray_list, &gray_list);
1206
1207        spin_unlock_irqrestore(&object->lock, flags);
1208    }
1209    rcu_read_unlock();
1210
1211    /* data/bss scanning */
1212    scan_block(_sdata, _edata, NULL, 1);
1213    scan_block(__bss_start, __bss_stop, NULL, 1);
1214
1215#ifdef CONFIG_SMP
1216    /* per-cpu sections scanning */
1217    for_each_possible_cpu(i)
1218        scan_block(__per_cpu_start + per_cpu_offset(i),
1219               __per_cpu_end + per_cpu_offset(i), NULL, 1);
1220#endif
1221
1222    /*
1223     * Struct page scanning for each node. The code below is not yet safe
1224     * with MEMORY_HOTPLUG.
1225     */
1226    for_each_online_node(i) {
1227        pg_data_t *pgdat = NODE_DATA(i);
1228        unsigned long start_pfn = pgdat->node_start_pfn;
1229        unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
1230        unsigned long pfn;
1231
1232        for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1233            struct page *page;
1234
1235            if (!pfn_valid(pfn))
1236                continue;
1237            page = pfn_to_page(pfn);
1238            /* only scan if page is in use */
1239            if (page_count(page) == 0)
1240                continue;
1241            scan_block(page, page + 1, NULL, 1);
1242        }
1243    }
1244
1245    /*
1246     * Scanning the task stacks (may introduce false negatives).
1247     */
1248    if (kmemleak_stack_scan) {
1249        struct task_struct *p, *g;
1250
1251        read_lock(&tasklist_lock);
1252        do_each_thread(g, p) {
1253            scan_block(task_stack_page(p), task_stack_page(p) +
1254                   THREAD_SIZE, NULL, 0);
1255        } while_each_thread(g, p);
1256        read_unlock(&tasklist_lock);
1257    }
1258
1259    /*
1260     * Scan the objects already referenced from the sections scanned
1261     * above.
1262     */
1263    scan_gray_list();
1264
1265    /*
1266     * Check for new or unreferenced objects modified since the previous
1267     * scan and color them gray until the next scan.
1268     */
1269    rcu_read_lock();
1270    list_for_each_entry_rcu(object, &object_list, object_list) {
1271        spin_lock_irqsave(&object->lock, flags);
1272        if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1273            && update_checksum(object) && get_object(object)) {
1274            /* color it gray temporarily */
1275            object->count = object->min_count;
1276            list_add_tail(&object->gray_list, &gray_list);
1277        }
1278        spin_unlock_irqrestore(&object->lock, flags);
1279    }
1280    rcu_read_unlock();
1281
1282    /*
1283     * Re-scan the gray list for modified unreferenced objects.
1284     */
1285    scan_gray_list();
1286
1287    /*
1288     * If scanning was stopped do not report any new unreferenced objects.
1289     */
1290    if (scan_should_stop())
1291        return;
1292
1293    /*
1294     * Scanning result reporting.
1295     */
1296    rcu_read_lock();
1297    list_for_each_entry_rcu(object, &object_list, object_list) {
1298        spin_lock_irqsave(&object->lock, flags);
1299        if (unreferenced_object(object) &&
1300            !(object->flags & OBJECT_REPORTED)) {
1301            object->flags |= OBJECT_REPORTED;
1302            new_leaks++;
1303        }
1304        spin_unlock_irqrestore(&object->lock, flags);
1305    }
1306    rcu_read_unlock();
1307
1308    if (new_leaks)
1309        pr_info("%d new suspected memory leaks (see "
1310            "/sys/kernel/debug/kmemleak)\n", new_leaks);
1311
1312}
1313
1314/*
1315 * Thread function performing automatic memory scanning. Unreferenced objects
1316 * at the end of a memory scan are reported but only the first time.
1317 */
1318static int kmemleak_scan_thread(void *arg)
1319{
1320    static int first_run = 1;
1321
1322    pr_info("Automatic memory scanning thread started\n");
1323    set_user_nice(current, 10);
1324
1325    /*
1326     * Wait before the first scan to allow the system to fully initialize.
1327     */
1328    if (first_run) {
1329        first_run = 0;
1330        ssleep(SECS_FIRST_SCAN);
1331    }
1332
1333    while (!kthread_should_stop()) {
1334        signed long timeout = jiffies_scan_wait;
1335
1336        mutex_lock(&scan_mutex);
1337        kmemleak_scan();
1338        mutex_unlock(&scan_mutex);
1339
1340        /* wait before the next scan */
1341        while (timeout && !kthread_should_stop())
1342            timeout = schedule_timeout_interruptible(timeout);
1343    }
1344
1345    pr_info("Automatic memory scanning thread ended\n");
1346
1347    return 0;
1348}
1349
1350/*
1351 * Start the automatic memory scanning thread. This function must be called
1352 * with the scan_mutex held.
1353 */
1354static void start_scan_thread(void)
1355{
1356    if (scan_thread)
1357        return;
1358    scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1359    if (IS_ERR(scan_thread)) {
1360        pr_warning("Failed to create the scan thread\n");
1361        scan_thread = NULL;
1362    }
1363}
1364
1365/*
1366 * Stop the automatic memory scanning thread. This function must be called
1367 * with the scan_mutex held.
1368 */
1369static void stop_scan_thread(void)
1370{
1371    if (scan_thread) {
1372        kthread_stop(scan_thread);
1373        scan_thread = NULL;
1374    }
1375}
1376
1377/*
1378 * Iterate over the object_list and return the first valid object at or after
1379 * the required position with its use_count incremented. The function triggers
1380 * a memory scanning when the pos argument points to the first position.
1381 */
1382static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1383{
1384    struct kmemleak_object *object;
1385    loff_t n = *pos;
1386    int err;
1387
1388    err = mutex_lock_interruptible(&scan_mutex);
1389    if (err < 0)
1390        return ERR_PTR(err);
1391
1392    rcu_read_lock();
1393    list_for_each_entry_rcu(object, &object_list, object_list) {
1394        if (n-- > 0)
1395            continue;
1396        if (get_object(object))
1397            goto out;
1398    }
1399    object = NULL;
1400out:
1401    return object;
1402}
1403
1404/*
1405 * Return the next object in the object_list. The function decrements the
1406 * use_count of the previous object and increases that of the next one.
1407 */
1408static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1409{
1410    struct kmemleak_object *prev_obj = v;
1411    struct kmemleak_object *next_obj = NULL;
1412    struct list_head *n = &prev_obj->object_list;
1413
1414    ++(*pos);
1415
1416    list_for_each_continue_rcu(n, &object_list) {
1417        struct kmemleak_object *obj =
1418            list_entry(n, struct kmemleak_object, object_list);
1419        if (get_object(obj)) {
1420            next_obj = obj;
1421            break;
1422        }
1423    }
1424
1425    put_object(prev_obj);
1426    return next_obj;
1427}
1428
1429/*
1430 * Decrement the use_count of the last object required, if any.
1431 */
1432static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1433{
1434    if (!IS_ERR(v)) {
1435        /*
1436         * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1437         * waiting was interrupted, so only release it if !IS_ERR.
1438         */
1439        rcu_read_unlock();
1440        mutex_unlock(&scan_mutex);
1441        if (v)
1442            put_object(v);
1443    }
1444}
1445
1446/*
1447 * Print the information for an unreferenced object to the seq file.
1448 */
1449static int kmemleak_seq_show(struct seq_file *seq, void *v)
1450{
1451    struct kmemleak_object *object = v;
1452    unsigned long flags;
1453
1454    spin_lock_irqsave(&object->lock, flags);
1455    if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1456        print_unreferenced(seq, object);
1457    spin_unlock_irqrestore(&object->lock, flags);
1458    return 0;
1459}
1460
1461static const struct seq_operations kmemleak_seq_ops = {
1462    .start = kmemleak_seq_start,
1463    .next = kmemleak_seq_next,
1464    .stop = kmemleak_seq_stop,
1465    .show = kmemleak_seq_show,
1466};
1467
1468static int kmemleak_open(struct inode *inode, struct file *file)
1469{
1470    if (!atomic_read(&kmemleak_enabled))
1471        return -EBUSY;
1472
1473    return seq_open(file, &kmemleak_seq_ops);
1474}
1475
1476static int kmemleak_release(struct inode *inode, struct file *file)
1477{
1478    return seq_release(inode, file);
1479}
1480
1481static int dump_str_object_info(const char *str)
1482{
1483    unsigned long flags;
1484    struct kmemleak_object *object;
1485    unsigned long addr;
1486
1487    addr= simple_strtoul(str, NULL, 0);
1488    object = find_and_get_object(addr, 0);
1489    if (!object) {
1490        pr_info("Unknown object at 0x%08lx\n", addr);
1491        return -EINVAL;
1492    }
1493
1494    spin_lock_irqsave(&object->lock, flags);
1495    dump_object_info(object);
1496    spin_unlock_irqrestore(&object->lock, flags);
1497
1498    put_object(object);
1499    return 0;
1500}
1501
1502/*
1503 * We use grey instead of black to ensure we can do future scans on the same
1504 * objects. If we did not do future scans these black objects could
1505 * potentially contain references to newly allocated objects in the future and
1506 * we'd end up with false positives.
1507 */
1508static void kmemleak_clear(void)
1509{
1510    struct kmemleak_object *object;
1511    unsigned long flags;
1512
1513    rcu_read_lock();
1514    list_for_each_entry_rcu(object, &object_list, object_list) {
1515        spin_lock_irqsave(&object->lock, flags);
1516        if ((object->flags & OBJECT_REPORTED) &&
1517            unreferenced_object(object))
1518            __paint_it(object, KMEMLEAK_GREY);
1519        spin_unlock_irqrestore(&object->lock, flags);
1520    }
1521    rcu_read_unlock();
1522}
1523
1524/*
1525 * File write operation to configure kmemleak at run-time. The following
1526 * commands can be written to the /sys/kernel/debug/kmemleak file:
1527 * off - disable kmemleak (irreversible)
1528 * stack=on - enable the task stacks scanning
1529 * stack=off - disable the tasks stacks scanning
1530 * scan=on - start the automatic memory scanning thread
1531 * scan=off - stop the automatic memory scanning thread
1532 * scan=... - set the automatic memory scanning period in seconds (0 to
1533 * disable it)
1534 * scan - trigger a memory scan
1535 * clear - mark all current reported unreferenced kmemleak objects as
1536 * grey to ignore printing them
1537 * dump=... - dump information about the object found at the given address
1538 */
1539static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1540                  size_t size, loff_t *ppos)
1541{
1542    char buf[64];
1543    int buf_size;
1544    int ret;
1545
1546    buf_size = min(size, (sizeof(buf) - 1));
1547    if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1548        return -EFAULT;
1549    buf[buf_size] = 0;
1550
1551    ret = mutex_lock_interruptible(&scan_mutex);
1552    if (ret < 0)
1553        return ret;
1554
1555    if (strncmp(buf, "off", 3) == 0)
1556        kmemleak_disable();
1557    else if (strncmp(buf, "stack=on", 8) == 0)
1558        kmemleak_stack_scan = 1;
1559    else if (strncmp(buf, "stack=off", 9) == 0)
1560        kmemleak_stack_scan = 0;
1561    else if (strncmp(buf, "scan=on", 7) == 0)
1562        start_scan_thread();
1563    else if (strncmp(buf, "scan=off", 8) == 0)
1564        stop_scan_thread();
1565    else if (strncmp(buf, "scan=", 5) == 0) {
1566        unsigned long secs;
1567
1568        ret = strict_strtoul(buf + 5, 0, &secs);
1569        if (ret < 0)
1570            goto out;
1571        stop_scan_thread();
1572        if (secs) {
1573            jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1574            start_scan_thread();
1575        }
1576    } else if (strncmp(buf, "scan", 4) == 0)
1577        kmemleak_scan();
1578    else if (strncmp(buf, "clear", 5) == 0)
1579        kmemleak_clear();
1580    else if (strncmp(buf, "dump=", 5) == 0)
1581        ret = dump_str_object_info(buf + 5);
1582    else
1583        ret = -EINVAL;
1584
1585out:
1586    mutex_unlock(&scan_mutex);
1587    if (ret < 0)
1588        return ret;
1589
1590    /* ignore the rest of the buffer, only one command at a time */
1591    *ppos += size;
1592    return size;
1593}
1594
1595static const struct file_operations kmemleak_fops = {
1596    .owner = THIS_MODULE,
1597    .open = kmemleak_open,
1598    .read = seq_read,
1599    .write = kmemleak_write,
1600    .llseek = seq_lseek,
1601    .release = kmemleak_release,
1602};
1603
1604/*
1605 * Perform the freeing of the kmemleak internal objects after waiting for any
1606 * current memory scan to complete.
1607 */
1608static void kmemleak_do_cleanup(struct work_struct *work)
1609{
1610    struct kmemleak_object *object;
1611
1612    mutex_lock(&scan_mutex);
1613    stop_scan_thread();
1614
1615    rcu_read_lock();
1616    list_for_each_entry_rcu(object, &object_list, object_list)
1617        delete_object_full(object->pointer);
1618    rcu_read_unlock();
1619    mutex_unlock(&scan_mutex);
1620}
1621
1622static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1623
1624/*
1625 * Disable kmemleak. No memory allocation/freeing will be traced once this
1626 * function is called. Disabling kmemleak is an irreversible operation.
1627 */
1628static void kmemleak_disable(void)
1629{
1630    /* atomically check whether it was already invoked */
1631    if (atomic_cmpxchg(&kmemleak_error, 0, 1))
1632        return;
1633
1634    /* stop any memory operation tracing */
1635    atomic_set(&kmemleak_early_log, 0);
1636    atomic_set(&kmemleak_enabled, 0);
1637
1638    /* check whether it is too early for a kernel thread */
1639    if (atomic_read(&kmemleak_initialized))
1640        schedule_work(&cleanup_work);
1641
1642    pr_info("Kernel memory leak detector disabled\n");
1643}
1644
1645/*
1646 * Allow boot-time kmemleak disabling (enabled by default).
1647 */
1648static int kmemleak_boot_config(char *str)
1649{
1650    if (!str)
1651        return -EINVAL;
1652    if (strcmp(str, "off") == 0)
1653        kmemleak_disable();
1654    else if (strcmp(str, "on") == 0)
1655        kmemleak_skip_disable = 1;
1656    else
1657        return -EINVAL;
1658    return 0;
1659}
1660early_param("kmemleak", kmemleak_boot_config);
1661
1662/*
1663 * Kmemleak initialization.
1664 */
1665void __init kmemleak_init(void)
1666{
1667    int i;
1668    unsigned long flags;
1669
1670#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1671    if (!kmemleak_skip_disable) {
1672        kmemleak_disable();
1673        return;
1674    }
1675#endif
1676
1677    jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1678    jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1679
1680    object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1681    scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1682    INIT_PRIO_TREE_ROOT(&object_tree_root);
1683
1684    /* the kernel is still in UP mode, so disabling the IRQs is enough */
1685    local_irq_save(flags);
1686    if (!atomic_read(&kmemleak_error)) {
1687        atomic_set(&kmemleak_enabled, 1);
1688        atomic_set(&kmemleak_early_log, 0);
1689    }
1690    local_irq_restore(flags);
1691
1692    /*
1693     * This is the point where tracking allocations is safe. Automatic
1694     * scanning is started during the late initcall. Add the early logged
1695     * callbacks to the kmemleak infrastructure.
1696     */
1697    for (i = 0; i < crt_early_log; i++) {
1698        struct early_log *log = &early_log[i];
1699
1700        switch (log->op_type) {
1701        case KMEMLEAK_ALLOC:
1702            early_alloc(log);
1703            break;
1704        case KMEMLEAK_FREE:
1705            kmemleak_free(log->ptr);
1706            break;
1707        case KMEMLEAK_FREE_PART:
1708            kmemleak_free_part(log->ptr, log->size);
1709            break;
1710        case KMEMLEAK_NOT_LEAK:
1711            kmemleak_not_leak(log->ptr);
1712            break;
1713        case KMEMLEAK_IGNORE:
1714            kmemleak_ignore(log->ptr);
1715            break;
1716        case KMEMLEAK_SCAN_AREA:
1717            kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
1718            break;
1719        case KMEMLEAK_NO_SCAN:
1720            kmemleak_no_scan(log->ptr);
1721            break;
1722        default:
1723            WARN_ON(1);
1724        }
1725    }
1726}
1727
1728/*
1729 * Late initialization function.
1730 */
1731static int __init kmemleak_late_init(void)
1732{
1733    struct dentry *dentry;
1734
1735    atomic_set(&kmemleak_initialized, 1);
1736
1737    if (atomic_read(&kmemleak_error)) {
1738        /*
1739         * Some error occurred and kmemleak was disabled. There is a
1740         * small chance that kmemleak_disable() was called immediately
1741         * after setting kmemleak_initialized and we may end up with
1742         * two clean-up threads but serialized by scan_mutex.
1743         */
1744        schedule_work(&cleanup_work);
1745        return -ENOMEM;
1746    }
1747
1748    dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
1749                     &kmemleak_fops);
1750    if (!dentry)
1751        pr_warning("Failed to create the debugfs kmemleak file\n");
1752    mutex_lock(&scan_mutex);
1753    start_scan_thread();
1754    mutex_unlock(&scan_mutex);
1755
1756    pr_info("Kernel memory leak detector initialized\n");
1757
1758    return 0;
1759}
1760late_initcall(kmemleak_late_init);
1761

Archive Download this file



interactive