Root/mm/kmemleak.c

1/*
2 * mm/kmemleak.c
3 *
4 * Copyright (C) 2008 ARM Limited
5 * Written by Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 *
21 * For more information on the algorithm and kmemleak usage, please see
22 * Documentation/kmemleak.txt.
23 *
24 * Notes on locking
25 * ----------------
26 *
27 * The following locks and mutexes are used by kmemleak:
28 *
29 * - kmemleak_lock (rwlock): protects the object_list modifications and
30 * accesses to the object_tree_root. The object_list is the main list
31 * holding the metadata (struct kmemleak_object) for the allocated memory
32 * blocks. The object_tree_root is a priority search tree used to look-up
33 * metadata based on a pointer to the corresponding memory block. The
34 * kmemleak_object structures are added to the object_list and
35 * object_tree_root in the create_object() function called from the
36 * kmemleak_alloc() callback and removed in delete_object() called from the
37 * kmemleak_free() callback
38 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
39 * the metadata (e.g. count) are protected by this lock. Note that some
40 * members of this structure may be protected by other means (atomic or
41 * kmemleak_lock). This lock is also held when scanning the corresponding
42 * memory block to avoid the kernel freeing it via the kmemleak_free()
43 * callback. This is less heavyweight than holding a global lock like
44 * kmemleak_lock during scanning
45 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
46 * unreferenced objects at a time. The gray_list contains the objects which
47 * are already referenced or marked as false positives and need to be
48 * scanned. This list is only modified during a scanning episode when the
49 * scan_mutex is held. At the end of a scan, the gray_list is always empty.
50 * Note that the kmemleak_object.use_count is incremented when an object is
51 * added to the gray_list and therefore cannot be freed. This mutex also
52 * prevents multiple users of the "kmemleak" debugfs file together with
53 * modifications to the memory scanning parameters including the scan_thread
54 * pointer
55 *
56 * The kmemleak_object structures have a use_count incremented or decremented
57 * using the get_object()/put_object() functions. When the use_count becomes
58 * 0, this count can no longer be incremented and put_object() schedules the
59 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
60 * function must be protected by rcu_read_lock() to avoid accessing a freed
61 * structure.
62 */
63
64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65
66#include <linux/init.h>
67#include <linux/kernel.h>
68#include <linux/list.h>
69#include <linux/sched.h>
70#include <linux/jiffies.h>
71#include <linux/delay.h>
72#include <linux/export.h>
73#include <linux/kthread.h>
74#include <linux/prio_tree.h>
75#include <linux/fs.h>
76#include <linux/debugfs.h>
77#include <linux/seq_file.h>
78#include <linux/cpumask.h>
79#include <linux/spinlock.h>
80#include <linux/mutex.h>
81#include <linux/rcupdate.h>
82#include <linux/stacktrace.h>
83#include <linux/cache.h>
84#include <linux/percpu.h>
85#include <linux/hardirq.h>
86#include <linux/mmzone.h>
87#include <linux/slab.h>
88#include <linux/thread_info.h>
89#include <linux/err.h>
90#include <linux/uaccess.h>
91#include <linux/string.h>
92#include <linux/nodemask.h>
93#include <linux/mm.h>
94#include <linux/workqueue.h>
95#include <linux/crc32.h>
96
97#include <asm/sections.h>
98#include <asm/processor.h>
99#include <linux/atomic.h>
100
101#include <linux/kmemcheck.h>
102#include <linux/kmemleak.h>
103#include <linux/memory_hotplug.h>
104
105/*
106 * Kmemleak configuration and common defines.
107 */
108#define MAX_TRACE 16 /* stack trace length */
109#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
110#define SECS_FIRST_SCAN 60 /* delay before the first scan */
111#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
112#define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
113
114#define BYTES_PER_POINTER sizeof(void *)
115
116/* GFP bitmask for kmemleak internal allocations */
117#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
118                 __GFP_NORETRY | __GFP_NOMEMALLOC | \
119                 __GFP_NOWARN)
120
121/* scanning area inside a memory block */
122struct kmemleak_scan_area {
123    struct hlist_node node;
124    unsigned long start;
125    size_t size;
126};
127
128#define KMEMLEAK_GREY 0
129#define KMEMLEAK_BLACK -1
130
131/*
132 * Structure holding the metadata for each allocated memory block.
133 * Modifications to such objects should be made while holding the
134 * object->lock. Insertions or deletions from object_list, gray_list or
135 * tree_node are already protected by the corresponding locks or mutex (see
136 * the notes on locking above). These objects are reference-counted
137 * (use_count) and freed using the RCU mechanism.
138 */
139struct kmemleak_object {
140    spinlock_t lock;
141    unsigned long flags; /* object status flags */
142    struct list_head object_list;
143    struct list_head gray_list;
144    struct prio_tree_node tree_node;
145    struct rcu_head rcu; /* object_list lockless traversal */
146    /* object usage count; object freed when use_count == 0 */
147    atomic_t use_count;
148    unsigned long pointer;
149    size_t size;
150    /* minimum number of a pointers found before it is considered leak */
151    int min_count;
152    /* the total number of pointers found pointing to this object */
153    int count;
154    /* checksum for detecting modified objects */
155    u32 checksum;
156    /* memory ranges to be scanned inside an object (empty for all) */
157    struct hlist_head area_list;
158    unsigned long trace[MAX_TRACE];
159    unsigned int trace_len;
160    unsigned long jiffies; /* creation timestamp */
161    pid_t pid; /* pid of the current task */
162    char comm[TASK_COMM_LEN]; /* executable name */
163};
164
165/* flag representing the memory block allocation status */
166#define OBJECT_ALLOCATED (1 << 0)
167/* flag set after the first reporting of an unreference object */
168#define OBJECT_REPORTED (1 << 1)
169/* flag set to not scan the object */
170#define OBJECT_NO_SCAN (1 << 2)
171
172/* number of bytes to print per line; must be 16 or 32 */
173#define HEX_ROW_SIZE 16
174/* number of bytes to print at a time (1, 2, 4, 8) */
175#define HEX_GROUP_SIZE 1
176/* include ASCII after the hex output */
177#define HEX_ASCII 1
178/* max number of lines to be printed */
179#define HEX_MAX_LINES 2
180
181/* the list of all allocated objects */
182static LIST_HEAD(object_list);
183/* the list of gray-colored objects (see color_gray comment below) */
184static LIST_HEAD(gray_list);
185/* prio search tree for object boundaries */
186static struct prio_tree_root object_tree_root;
187/* rw_lock protecting the access to object_list and prio_tree_root */
188static DEFINE_RWLOCK(kmemleak_lock);
189
190/* allocation caches for kmemleak internal data */
191static struct kmem_cache *object_cache;
192static struct kmem_cache *scan_area_cache;
193
194/* set if tracing memory operations is enabled */
195static atomic_t kmemleak_enabled = ATOMIC_INIT(0);
196/* set in the late_initcall if there were no errors */
197static atomic_t kmemleak_initialized = ATOMIC_INIT(0);
198/* enables or disables early logging of the memory operations */
199static atomic_t kmemleak_early_log = ATOMIC_INIT(1);
200/* set if a kmemleak warning was issued */
201static atomic_t kmemleak_warning = ATOMIC_INIT(0);
202/* set if a fatal kmemleak error has occurred */
203static atomic_t kmemleak_error = ATOMIC_INIT(0);
204
205/* minimum and maximum address that may be valid pointers */
206static unsigned long min_addr = ULONG_MAX;
207static unsigned long max_addr;
208
209static struct task_struct *scan_thread;
210/* used to avoid reporting of recently allocated objects */
211static unsigned long jiffies_min_age;
212static unsigned long jiffies_last_scan;
213/* delay between automatic memory scannings */
214static signed long jiffies_scan_wait;
215/* enables or disables the task stacks scanning */
216static int kmemleak_stack_scan = 1;
217/* protects the memory scanning, parameters and debug/kmemleak file access */
218static DEFINE_MUTEX(scan_mutex);
219/* setting kmemleak=on, will set this var, skipping the disable */
220static int kmemleak_skip_disable;
221
222
223/*
224 * Early object allocation/freeing logging. Kmemleak is initialized after the
225 * kernel allocator. However, both the kernel allocator and kmemleak may
226 * allocate memory blocks which need to be tracked. Kmemleak defines an
227 * arbitrary buffer to hold the allocation/freeing information before it is
228 * fully initialized.
229 */
230
231/* kmemleak operation type for early logging */
232enum {
233    KMEMLEAK_ALLOC,
234    KMEMLEAK_ALLOC_PERCPU,
235    KMEMLEAK_FREE,
236    KMEMLEAK_FREE_PART,
237    KMEMLEAK_FREE_PERCPU,
238    KMEMLEAK_NOT_LEAK,
239    KMEMLEAK_IGNORE,
240    KMEMLEAK_SCAN_AREA,
241    KMEMLEAK_NO_SCAN
242};
243
244/*
245 * Structure holding the information passed to kmemleak callbacks during the
246 * early logging.
247 */
248struct early_log {
249    int op_type; /* kmemleak operation type */
250    const void *ptr; /* allocated/freed memory block */
251    size_t size; /* memory block size */
252    int min_count; /* minimum reference count */
253    unsigned long trace[MAX_TRACE]; /* stack trace */
254    unsigned int trace_len; /* stack trace length */
255};
256
257/* early logging buffer and current position */
258static struct early_log
259    early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
260static int crt_early_log __initdata;
261
262static void kmemleak_disable(void);
263
264/*
265 * Print a warning and dump the stack trace.
266 */
267#define kmemleak_warn(x...) do { \
268    pr_warning(x); \
269    dump_stack(); \
270    atomic_set(&kmemleak_warning, 1); \
271} while (0)
272
273/*
274 * Macro invoked when a serious kmemleak condition occurred and cannot be
275 * recovered from. Kmemleak will be disabled and further allocation/freeing
276 * tracing no longer available.
277 */
278#define kmemleak_stop(x...) do { \
279    kmemleak_warn(x); \
280    kmemleak_disable(); \
281} while (0)
282
283/*
284 * Printing of the objects hex dump to the seq file. The number of lines to be
285 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
286 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
287 * with the object->lock held.
288 */
289static void hex_dump_object(struct seq_file *seq,
290                struct kmemleak_object *object)
291{
292    const u8 *ptr = (const u8 *)object->pointer;
293    int i, len, remaining;
294    unsigned char linebuf[HEX_ROW_SIZE * 5];
295
296    /* limit the number of lines to HEX_MAX_LINES */
297    remaining = len =
298        min(object->size, (size_t)(HEX_MAX_LINES * HEX_ROW_SIZE));
299
300    seq_printf(seq, " hex dump (first %d bytes):\n", len);
301    for (i = 0; i < len; i += HEX_ROW_SIZE) {
302        int linelen = min(remaining, HEX_ROW_SIZE);
303
304        remaining -= HEX_ROW_SIZE;
305        hex_dump_to_buffer(ptr + i, linelen, HEX_ROW_SIZE,
306                   HEX_GROUP_SIZE, linebuf, sizeof(linebuf),
307                   HEX_ASCII);
308        seq_printf(seq, " %s\n", linebuf);
309    }
310}
311
312/*
313 * Object colors, encoded with count and min_count:
314 * - white - orphan object, not enough references to it (count < min_count)
315 * - gray - not orphan, not marked as false positive (min_count == 0) or
316 * sufficient references to it (count >= min_count)
317 * - black - ignore, it doesn't contain references (e.g. text section)
318 * (min_count == -1). No function defined for this color.
319 * Newly created objects don't have any color assigned (object->count == -1)
320 * before the next memory scan when they become white.
321 */
322static bool color_white(const struct kmemleak_object *object)
323{
324    return object->count != KMEMLEAK_BLACK &&
325        object->count < object->min_count;
326}
327
328static bool color_gray(const struct kmemleak_object *object)
329{
330    return object->min_count != KMEMLEAK_BLACK &&
331        object->count >= object->min_count;
332}
333
334/*
335 * Objects are considered unreferenced only if their color is white, they have
336 * not be deleted and have a minimum age to avoid false positives caused by
337 * pointers temporarily stored in CPU registers.
338 */
339static bool unreferenced_object(struct kmemleak_object *object)
340{
341    return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
342        time_before_eq(object->jiffies + jiffies_min_age,
343                   jiffies_last_scan);
344}
345
346/*
347 * Printing of the unreferenced objects information to the seq file. The
348 * print_unreferenced function must be called with the object->lock held.
349 */
350static void print_unreferenced(struct seq_file *seq,
351                   struct kmemleak_object *object)
352{
353    int i;
354    unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
355
356    seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
357           object->pointer, object->size);
358    seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
359           object->comm, object->pid, object->jiffies,
360           msecs_age / 1000, msecs_age % 1000);
361    hex_dump_object(seq, object);
362    seq_printf(seq, " backtrace:\n");
363
364    for (i = 0; i < object->trace_len; i++) {
365        void *ptr = (void *)object->trace[i];
366        seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
367    }
368}
369
370/*
371 * Print the kmemleak_object information. This function is used mainly for
372 * debugging special cases when kmemleak operations. It must be called with
373 * the object->lock held.
374 */
375static void dump_object_info(struct kmemleak_object *object)
376{
377    struct stack_trace trace;
378
379    trace.nr_entries = object->trace_len;
380    trace.entries = object->trace;
381
382    pr_notice("Object 0x%08lx (size %zu):\n",
383          object->tree_node.start, object->size);
384    pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
385          object->comm, object->pid, object->jiffies);
386    pr_notice(" min_count = %d\n", object->min_count);
387    pr_notice(" count = %d\n", object->count);
388    pr_notice(" flags = 0x%lx\n", object->flags);
389    pr_notice(" checksum = %d\n", object->checksum);
390    pr_notice(" backtrace:\n");
391    print_stack_trace(&trace, 4);
392}
393
394/*
395 * Look-up a memory block metadata (kmemleak_object) in the priority search
396 * tree based on a pointer value. If alias is 0, only values pointing to the
397 * beginning of the memory block are allowed. The kmemleak_lock must be held
398 * when calling this function.
399 */
400static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
401{
402    struct prio_tree_node *node;
403    struct prio_tree_iter iter;
404    struct kmemleak_object *object;
405
406    prio_tree_iter_init(&iter, &object_tree_root, ptr, ptr);
407    node = prio_tree_next(&iter);
408    if (node) {
409        object = prio_tree_entry(node, struct kmemleak_object,
410                     tree_node);
411        if (!alias && object->pointer != ptr) {
412            kmemleak_warn("Found object by alias at 0x%08lx\n",
413                      ptr);
414            dump_object_info(object);
415            object = NULL;
416        }
417    } else
418        object = NULL;
419
420    return object;
421}
422
423/*
424 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
425 * that once an object's use_count reached 0, the RCU freeing was already
426 * registered and the object should no longer be used. This function must be
427 * called under the protection of rcu_read_lock().
428 */
429static int get_object(struct kmemleak_object *object)
430{
431    return atomic_inc_not_zero(&object->use_count);
432}
433
434/*
435 * RCU callback to free a kmemleak_object.
436 */
437static void free_object_rcu(struct rcu_head *rcu)
438{
439    struct hlist_node *elem, *tmp;
440    struct kmemleak_scan_area *area;
441    struct kmemleak_object *object =
442        container_of(rcu, struct kmemleak_object, rcu);
443
444    /*
445     * Once use_count is 0 (guaranteed by put_object), there is no other
446     * code accessing this object, hence no need for locking.
447     */
448    hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) {
449        hlist_del(elem);
450        kmem_cache_free(scan_area_cache, area);
451    }
452    kmem_cache_free(object_cache, object);
453}
454
455/*
456 * Decrement the object use_count. Once the count is 0, free the object using
457 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
458 * delete_object() path, the delayed RCU freeing ensures that there is no
459 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
460 * is also possible.
461 */
462static void put_object(struct kmemleak_object *object)
463{
464    if (!atomic_dec_and_test(&object->use_count))
465        return;
466
467    /* should only get here after delete_object was called */
468    WARN_ON(object->flags & OBJECT_ALLOCATED);
469
470    call_rcu(&object->rcu, free_object_rcu);
471}
472
473/*
474 * Look up an object in the prio search tree and increase its use_count.
475 */
476static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
477{
478    unsigned long flags;
479    struct kmemleak_object *object = NULL;
480
481    rcu_read_lock();
482    read_lock_irqsave(&kmemleak_lock, flags);
483    if (ptr >= min_addr && ptr < max_addr)
484        object = lookup_object(ptr, alias);
485    read_unlock_irqrestore(&kmemleak_lock, flags);
486
487    /* check whether the object is still available */
488    if (object && !get_object(object))
489        object = NULL;
490    rcu_read_unlock();
491
492    return object;
493}
494
495/*
496 * Save stack trace to the given array of MAX_TRACE size.
497 */
498static int __save_stack_trace(unsigned long *trace)
499{
500    struct stack_trace stack_trace;
501
502    stack_trace.max_entries = MAX_TRACE;
503    stack_trace.nr_entries = 0;
504    stack_trace.entries = trace;
505    stack_trace.skip = 2;
506    save_stack_trace(&stack_trace);
507
508    return stack_trace.nr_entries;
509}
510
511/*
512 * Create the metadata (struct kmemleak_object) corresponding to an allocated
513 * memory block and add it to the object_list and object_tree_root.
514 */
515static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
516                         int min_count, gfp_t gfp)
517{
518    unsigned long flags;
519    struct kmemleak_object *object;
520    struct prio_tree_node *node;
521
522    object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
523    if (!object) {
524        pr_warning("Cannot allocate a kmemleak_object structure\n");
525        kmemleak_disable();
526        return NULL;
527    }
528
529    INIT_LIST_HEAD(&object->object_list);
530    INIT_LIST_HEAD(&object->gray_list);
531    INIT_HLIST_HEAD(&object->area_list);
532    spin_lock_init(&object->lock);
533    atomic_set(&object->use_count, 1);
534    object->flags = OBJECT_ALLOCATED;
535    object->pointer = ptr;
536    object->size = size;
537    object->min_count = min_count;
538    object->count = 0; /* white color initially */
539    object->jiffies = jiffies;
540    object->checksum = 0;
541
542    /* task information */
543    if (in_irq()) {
544        object->pid = 0;
545        strncpy(object->comm, "hardirq", sizeof(object->comm));
546    } else if (in_softirq()) {
547        object->pid = 0;
548        strncpy(object->comm, "softirq", sizeof(object->comm));
549    } else {
550        object->pid = current->pid;
551        /*
552         * There is a small chance of a race with set_task_comm(),
553         * however using get_task_comm() here may cause locking
554         * dependency issues with current->alloc_lock. In the worst
555         * case, the command line is not correct.
556         */
557        strncpy(object->comm, current->comm, sizeof(object->comm));
558    }
559
560    /* kernel backtrace */
561    object->trace_len = __save_stack_trace(object->trace);
562
563    INIT_PRIO_TREE_NODE(&object->tree_node);
564    object->tree_node.start = ptr;
565    object->tree_node.last = ptr + size - 1;
566
567    write_lock_irqsave(&kmemleak_lock, flags);
568
569    min_addr = min(min_addr, ptr);
570    max_addr = max(max_addr, ptr + size);
571    node = prio_tree_insert(&object_tree_root, &object->tree_node);
572    /*
573     * The code calling the kernel does not yet have the pointer to the
574     * memory block to be able to free it. However, we still hold the
575     * kmemleak_lock here in case parts of the kernel started freeing
576     * random memory blocks.
577     */
578    if (node != &object->tree_node) {
579        kmemleak_stop("Cannot insert 0x%lx into the object search tree "
580                  "(already existing)\n", ptr);
581        object = lookup_object(ptr, 1);
582        spin_lock(&object->lock);
583        dump_object_info(object);
584        spin_unlock(&object->lock);
585
586        goto out;
587    }
588    list_add_tail_rcu(&object->object_list, &object_list);
589out:
590    write_unlock_irqrestore(&kmemleak_lock, flags);
591    return object;
592}
593
594/*
595 * Remove the metadata (struct kmemleak_object) for a memory block from the
596 * object_list and object_tree_root and decrement its use_count.
597 */
598static void __delete_object(struct kmemleak_object *object)
599{
600    unsigned long flags;
601
602    write_lock_irqsave(&kmemleak_lock, flags);
603    prio_tree_remove(&object_tree_root, &object->tree_node);
604    list_del_rcu(&object->object_list);
605    write_unlock_irqrestore(&kmemleak_lock, flags);
606
607    WARN_ON(!(object->flags & OBJECT_ALLOCATED));
608    WARN_ON(atomic_read(&object->use_count) < 2);
609
610    /*
611     * Locking here also ensures that the corresponding memory block
612     * cannot be freed when it is being scanned.
613     */
614    spin_lock_irqsave(&object->lock, flags);
615    object->flags &= ~OBJECT_ALLOCATED;
616    spin_unlock_irqrestore(&object->lock, flags);
617    put_object(object);
618}
619
620/*
621 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
622 * delete it.
623 */
624static void delete_object_full(unsigned long ptr)
625{
626    struct kmemleak_object *object;
627
628    object = find_and_get_object(ptr, 0);
629    if (!object) {
630#ifdef DEBUG
631        kmemleak_warn("Freeing unknown object at 0x%08lx\n",
632                  ptr);
633#endif
634        return;
635    }
636    __delete_object(object);
637    put_object(object);
638}
639
640/*
641 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
642 * delete it. If the memory block is partially freed, the function may create
643 * additional metadata for the remaining parts of the block.
644 */
645static void delete_object_part(unsigned long ptr, size_t size)
646{
647    struct kmemleak_object *object;
648    unsigned long start, end;
649
650    object = find_and_get_object(ptr, 1);
651    if (!object) {
652#ifdef DEBUG
653        kmemleak_warn("Partially freeing unknown object at 0x%08lx "
654                  "(size %zu)\n", ptr, size);
655#endif
656        return;
657    }
658    __delete_object(object);
659
660    /*
661     * Create one or two objects that may result from the memory block
662     * split. Note that partial freeing is only done by free_bootmem() and
663     * this happens before kmemleak_init() is called. The path below is
664     * only executed during early log recording in kmemleak_init(), so
665     * GFP_KERNEL is enough.
666     */
667    start = object->pointer;
668    end = object->pointer + object->size;
669    if (ptr > start)
670        create_object(start, ptr - start, object->min_count,
671                  GFP_KERNEL);
672    if (ptr + size < end)
673        create_object(ptr + size, end - ptr - size, object->min_count,
674                  GFP_KERNEL);
675
676    put_object(object);
677}
678
679static void __paint_it(struct kmemleak_object *object, int color)
680{
681    object->min_count = color;
682    if (color == KMEMLEAK_BLACK)
683        object->flags |= OBJECT_NO_SCAN;
684}
685
686static void paint_it(struct kmemleak_object *object, int color)
687{
688    unsigned long flags;
689
690    spin_lock_irqsave(&object->lock, flags);
691    __paint_it(object, color);
692    spin_unlock_irqrestore(&object->lock, flags);
693}
694
695static void paint_ptr(unsigned long ptr, int color)
696{
697    struct kmemleak_object *object;
698
699    object = find_and_get_object(ptr, 0);
700    if (!object) {
701        kmemleak_warn("Trying to color unknown object "
702                  "at 0x%08lx as %s\n", ptr,
703                  (color == KMEMLEAK_GREY) ? "Grey" :
704                  (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
705        return;
706    }
707    paint_it(object, color);
708    put_object(object);
709}
710
711/*
712 * Mark an object permanently as gray-colored so that it can no longer be
713 * reported as a leak. This is used in general to mark a false positive.
714 */
715static void make_gray_object(unsigned long ptr)
716{
717    paint_ptr(ptr, KMEMLEAK_GREY);
718}
719
720/*
721 * Mark the object as black-colored so that it is ignored from scans and
722 * reporting.
723 */
724static void make_black_object(unsigned long ptr)
725{
726    paint_ptr(ptr, KMEMLEAK_BLACK);
727}
728
729/*
730 * Add a scanning area to the object. If at least one such area is added,
731 * kmemleak will only scan these ranges rather than the whole memory block.
732 */
733static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
734{
735    unsigned long flags;
736    struct kmemleak_object *object;
737    struct kmemleak_scan_area *area;
738
739    object = find_and_get_object(ptr, 1);
740    if (!object) {
741        kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
742                  ptr);
743        return;
744    }
745
746    area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
747    if (!area) {
748        pr_warning("Cannot allocate a scan area\n");
749        goto out;
750    }
751
752    spin_lock_irqsave(&object->lock, flags);
753    if (ptr + size > object->pointer + object->size) {
754        kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
755        dump_object_info(object);
756        kmem_cache_free(scan_area_cache, area);
757        goto out_unlock;
758    }
759
760    INIT_HLIST_NODE(&area->node);
761    area->start = ptr;
762    area->size = size;
763
764    hlist_add_head(&area->node, &object->area_list);
765out_unlock:
766    spin_unlock_irqrestore(&object->lock, flags);
767out:
768    put_object(object);
769}
770
771/*
772 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
773 * pointer. Such object will not be scanned by kmemleak but references to it
774 * are searched.
775 */
776static void object_no_scan(unsigned long ptr)
777{
778    unsigned long flags;
779    struct kmemleak_object *object;
780
781    object = find_and_get_object(ptr, 0);
782    if (!object) {
783        kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
784        return;
785    }
786
787    spin_lock_irqsave(&object->lock, flags);
788    object->flags |= OBJECT_NO_SCAN;
789    spin_unlock_irqrestore(&object->lock, flags);
790    put_object(object);
791}
792
793/*
794 * Log an early kmemleak_* call to the early_log buffer. These calls will be
795 * processed later once kmemleak is fully initialized.
796 */
797static void __init log_early(int op_type, const void *ptr, size_t size,
798                 int min_count)
799{
800    unsigned long flags;
801    struct early_log *log;
802
803    if (atomic_read(&kmemleak_error)) {
804        /* kmemleak stopped recording, just count the requests */
805        crt_early_log++;
806        return;
807    }
808
809    if (crt_early_log >= ARRAY_SIZE(early_log)) {
810        kmemleak_disable();
811        return;
812    }
813
814    /*
815     * There is no need for locking since the kernel is still in UP mode
816     * at this stage. Disabling the IRQs is enough.
817     */
818    local_irq_save(flags);
819    log = &early_log[crt_early_log];
820    log->op_type = op_type;
821    log->ptr = ptr;
822    log->size = size;
823    log->min_count = min_count;
824    log->trace_len = __save_stack_trace(log->trace);
825    crt_early_log++;
826    local_irq_restore(flags);
827}
828
829/*
830 * Log an early allocated block and populate the stack trace.
831 */
832static void early_alloc(struct early_log *log)
833{
834    struct kmemleak_object *object;
835    unsigned long flags;
836    int i;
837
838    if (!atomic_read(&kmemleak_enabled) || !log->ptr || IS_ERR(log->ptr))
839        return;
840
841    /*
842     * RCU locking needed to ensure object is not freed via put_object().
843     */
844    rcu_read_lock();
845    object = create_object((unsigned long)log->ptr, log->size,
846                   log->min_count, GFP_ATOMIC);
847    if (!object)
848        goto out;
849    spin_lock_irqsave(&object->lock, flags);
850    for (i = 0; i < log->trace_len; i++)
851        object->trace[i] = log->trace[i];
852    object->trace_len = log->trace_len;
853    spin_unlock_irqrestore(&object->lock, flags);
854out:
855    rcu_read_unlock();
856}
857
858/*
859 * Log an early allocated block and populate the stack trace.
860 */
861static void early_alloc_percpu(struct early_log *log)
862{
863    unsigned int cpu;
864    const void __percpu *ptr = log->ptr;
865
866    for_each_possible_cpu(cpu) {
867        log->ptr = per_cpu_ptr(ptr, cpu);
868        early_alloc(log);
869    }
870}
871
872/**
873 * kmemleak_alloc - register a newly allocated object
874 * @ptr: pointer to beginning of the object
875 * @size: size of the object
876 * @min_count: minimum number of references to this object. If during memory
877 * scanning a number of references less than @min_count is found,
878 * the object is reported as a memory leak. If @min_count is 0,
879 * the object is never reported as a leak. If @min_count is -1,
880 * the object is ignored (not scanned and not reported as a leak)
881 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
882 *
883 * This function is called from the kernel allocators when a new object
884 * (memory block) is allocated (kmem_cache_alloc, kmalloc, vmalloc etc.).
885 */
886void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
887              gfp_t gfp)
888{
889    pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
890
891    if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
892        create_object((unsigned long)ptr, size, min_count, gfp);
893    else if (atomic_read(&kmemleak_early_log))
894        log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
895}
896EXPORT_SYMBOL_GPL(kmemleak_alloc);
897
898/**
899 * kmemleak_alloc_percpu - register a newly allocated __percpu object
900 * @ptr: __percpu pointer to beginning of the object
901 * @size: size of the object
902 *
903 * This function is called from the kernel percpu allocator when a new object
904 * (memory block) is allocated (alloc_percpu). It assumes GFP_KERNEL
905 * allocation.
906 */
907void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size)
908{
909    unsigned int cpu;
910
911    pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
912
913    /*
914     * Percpu allocations are only scanned and not reported as leaks
915     * (min_count is set to 0).
916     */
917    if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
918        for_each_possible_cpu(cpu)
919            create_object((unsigned long)per_cpu_ptr(ptr, cpu),
920                      size, 0, GFP_KERNEL);
921    else if (atomic_read(&kmemleak_early_log))
922        log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
923}
924EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
925
926/**
927 * kmemleak_free - unregister a previously registered object
928 * @ptr: pointer to beginning of the object
929 *
930 * This function is called from the kernel allocators when an object (memory
931 * block) is freed (kmem_cache_free, kfree, vfree etc.).
932 */
933void __ref kmemleak_free(const void *ptr)
934{
935    pr_debug("%s(0x%p)\n", __func__, ptr);
936
937    if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
938        delete_object_full((unsigned long)ptr);
939    else if (atomic_read(&kmemleak_early_log))
940        log_early(KMEMLEAK_FREE, ptr, 0, 0);
941}
942EXPORT_SYMBOL_GPL(kmemleak_free);
943
944/**
945 * kmemleak_free_part - partially unregister a previously registered object
946 * @ptr: pointer to the beginning or inside the object. This also
947 * represents the start of the range to be freed
948 * @size: size to be unregistered
949 *
950 * This function is called when only a part of a memory block is freed
951 * (usually from the bootmem allocator).
952 */
953void __ref kmemleak_free_part(const void *ptr, size_t size)
954{
955    pr_debug("%s(0x%p)\n", __func__, ptr);
956
957    if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
958        delete_object_part((unsigned long)ptr, size);
959    else if (atomic_read(&kmemleak_early_log))
960        log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
961}
962EXPORT_SYMBOL_GPL(kmemleak_free_part);
963
964/**
965 * kmemleak_free_percpu - unregister a previously registered __percpu object
966 * @ptr: __percpu pointer to beginning of the object
967 *
968 * This function is called from the kernel percpu allocator when an object
969 * (memory block) is freed (free_percpu).
970 */
971void __ref kmemleak_free_percpu(const void __percpu *ptr)
972{
973    unsigned int cpu;
974
975    pr_debug("%s(0x%p)\n", __func__, ptr);
976
977    if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
978        for_each_possible_cpu(cpu)
979            delete_object_full((unsigned long)per_cpu_ptr(ptr,
980                                      cpu));
981    else if (atomic_read(&kmemleak_early_log))
982        log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
983}
984EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
985
986/**
987 * kmemleak_not_leak - mark an allocated object as false positive
988 * @ptr: pointer to beginning of the object
989 *
990 * Calling this function on an object will cause the memory block to no longer
991 * be reported as leak and always be scanned.
992 */
993void __ref kmemleak_not_leak(const void *ptr)
994{
995    pr_debug("%s(0x%p)\n", __func__, ptr);
996
997    if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
998        make_gray_object((unsigned long)ptr);
999    else if (atomic_read(&kmemleak_early_log))
1000        log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
1001}
1002EXPORT_SYMBOL(kmemleak_not_leak);
1003
1004/**
1005 * kmemleak_ignore - ignore an allocated object
1006 * @ptr: pointer to beginning of the object
1007 *
1008 * Calling this function on an object will cause the memory block to be
1009 * ignored (not scanned and not reported as a leak). This is usually done when
1010 * it is known that the corresponding block is not a leak and does not contain
1011 * any references to other allocated memory blocks.
1012 */
1013void __ref kmemleak_ignore(const void *ptr)
1014{
1015    pr_debug("%s(0x%p)\n", __func__, ptr);
1016
1017    if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
1018        make_black_object((unsigned long)ptr);
1019    else if (atomic_read(&kmemleak_early_log))
1020        log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
1021}
1022EXPORT_SYMBOL(kmemleak_ignore);
1023
1024/**
1025 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1026 * @ptr: pointer to beginning or inside the object. This also
1027 * represents the start of the scan area
1028 * @size: size of the scan area
1029 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1030 *
1031 * This function is used when it is known that only certain parts of an object
1032 * contain references to other objects. Kmemleak will only scan these areas
1033 * reducing the number false negatives.
1034 */
1035void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1036{
1037    pr_debug("%s(0x%p)\n", __func__, ptr);
1038
1039    if (atomic_read(&kmemleak_enabled) && ptr && size && !IS_ERR(ptr))
1040        add_scan_area((unsigned long)ptr, size, gfp);
1041    else if (atomic_read(&kmemleak_early_log))
1042        log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
1043}
1044EXPORT_SYMBOL(kmemleak_scan_area);
1045
1046/**
1047 * kmemleak_no_scan - do not scan an allocated object
1048 * @ptr: pointer to beginning of the object
1049 *
1050 * This function notifies kmemleak not to scan the given memory block. Useful
1051 * in situations where it is known that the given object does not contain any
1052 * references to other objects. Kmemleak will not scan such objects reducing
1053 * the number of false negatives.
1054 */
1055void __ref kmemleak_no_scan(const void *ptr)
1056{
1057    pr_debug("%s(0x%p)\n", __func__, ptr);
1058
1059    if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
1060        object_no_scan((unsigned long)ptr);
1061    else if (atomic_read(&kmemleak_early_log))
1062        log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
1063}
1064EXPORT_SYMBOL(kmemleak_no_scan);
1065
1066/*
1067 * Update an object's checksum and return true if it was modified.
1068 */
1069static bool update_checksum(struct kmemleak_object *object)
1070{
1071    u32 old_csum = object->checksum;
1072
1073    if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
1074        return false;
1075
1076    object->checksum = crc32(0, (void *)object->pointer, object->size);
1077    return object->checksum != old_csum;
1078}
1079
1080/*
1081 * Memory scanning is a long process and it needs to be interruptable. This
1082 * function checks whether such interrupt condition occurred.
1083 */
1084static int scan_should_stop(void)
1085{
1086    if (!atomic_read(&kmemleak_enabled))
1087        return 1;
1088
1089    /*
1090     * This function may be called from either process or kthread context,
1091     * hence the need to check for both stop conditions.
1092     */
1093    if (current->mm)
1094        return signal_pending(current);
1095    else
1096        return kthread_should_stop();
1097
1098    return 0;
1099}
1100
1101/*
1102 * Scan a memory block (exclusive range) for valid pointers and add those
1103 * found to the gray list.
1104 */
1105static void scan_block(void *_start, void *_end,
1106               struct kmemleak_object *scanned, int allow_resched)
1107{
1108    unsigned long *ptr;
1109    unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1110    unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1111
1112    for (ptr = start; ptr < end; ptr++) {
1113        struct kmemleak_object *object;
1114        unsigned long flags;
1115        unsigned long pointer;
1116
1117        if (allow_resched)
1118            cond_resched();
1119        if (scan_should_stop())
1120            break;
1121
1122        /* don't scan uninitialized memory */
1123        if (!kmemcheck_is_obj_initialized((unsigned long)ptr,
1124                          BYTES_PER_POINTER))
1125            continue;
1126
1127        pointer = *ptr;
1128
1129        object = find_and_get_object(pointer, 1);
1130        if (!object)
1131            continue;
1132        if (object == scanned) {
1133            /* self referenced, ignore */
1134            put_object(object);
1135            continue;
1136        }
1137
1138        /*
1139         * Avoid the lockdep recursive warning on object->lock being
1140         * previously acquired in scan_object(). These locks are
1141         * enclosed by scan_mutex.
1142         */
1143        spin_lock_irqsave_nested(&object->lock, flags,
1144                     SINGLE_DEPTH_NESTING);
1145        if (!color_white(object)) {
1146            /* non-orphan, ignored or new */
1147            spin_unlock_irqrestore(&object->lock, flags);
1148            put_object(object);
1149            continue;
1150        }
1151
1152        /*
1153         * Increase the object's reference count (number of pointers
1154         * to the memory block). If this count reaches the required
1155         * minimum, the object's color will become gray and it will be
1156         * added to the gray_list.
1157         */
1158        object->count++;
1159        if (color_gray(object)) {
1160            list_add_tail(&object->gray_list, &gray_list);
1161            spin_unlock_irqrestore(&object->lock, flags);
1162            continue;
1163        }
1164
1165        spin_unlock_irqrestore(&object->lock, flags);
1166        put_object(object);
1167    }
1168}
1169
1170/*
1171 * Scan a memory block corresponding to a kmemleak_object. A condition is
1172 * that object->use_count >= 1.
1173 */
1174static void scan_object(struct kmemleak_object *object)
1175{
1176    struct kmemleak_scan_area *area;
1177    struct hlist_node *elem;
1178    unsigned long flags;
1179
1180    /*
1181     * Once the object->lock is acquired, the corresponding memory block
1182     * cannot be freed (the same lock is acquired in delete_object).
1183     */
1184    spin_lock_irqsave(&object->lock, flags);
1185    if (object->flags & OBJECT_NO_SCAN)
1186        goto out;
1187    if (!(object->flags & OBJECT_ALLOCATED))
1188        /* already freed object */
1189        goto out;
1190    if (hlist_empty(&object->area_list)) {
1191        void *start = (void *)object->pointer;
1192        void *end = (void *)(object->pointer + object->size);
1193
1194        while (start < end && (object->flags & OBJECT_ALLOCATED) &&
1195               !(object->flags & OBJECT_NO_SCAN)) {
1196            scan_block(start, min(start + MAX_SCAN_SIZE, end),
1197                   object, 0);
1198            start += MAX_SCAN_SIZE;
1199
1200            spin_unlock_irqrestore(&object->lock, flags);
1201            cond_resched();
1202            spin_lock_irqsave(&object->lock, flags);
1203        }
1204    } else
1205        hlist_for_each_entry(area, elem, &object->area_list, node)
1206            scan_block((void *)area->start,
1207                   (void *)(area->start + area->size),
1208                   object, 0);
1209out:
1210    spin_unlock_irqrestore(&object->lock, flags);
1211}
1212
1213/*
1214 * Scan the objects already referenced (gray objects). More objects will be
1215 * referenced and, if there are no memory leaks, all the objects are scanned.
1216 */
1217static void scan_gray_list(void)
1218{
1219    struct kmemleak_object *object, *tmp;
1220
1221    /*
1222     * The list traversal is safe for both tail additions and removals
1223     * from inside the loop. The kmemleak objects cannot be freed from
1224     * outside the loop because their use_count was incremented.
1225     */
1226    object = list_entry(gray_list.next, typeof(*object), gray_list);
1227    while (&object->gray_list != &gray_list) {
1228        cond_resched();
1229
1230        /* may add new objects to the list */
1231        if (!scan_should_stop())
1232            scan_object(object);
1233
1234        tmp = list_entry(object->gray_list.next, typeof(*object),
1235                 gray_list);
1236
1237        /* remove the object from the list and release it */
1238        list_del(&object->gray_list);
1239        put_object(object);
1240
1241        object = tmp;
1242    }
1243    WARN_ON(!list_empty(&gray_list));
1244}
1245
1246/*
1247 * Scan data sections and all the referenced memory blocks allocated via the
1248 * kernel's standard allocators. This function must be called with the
1249 * scan_mutex held.
1250 */
1251static void kmemleak_scan(void)
1252{
1253    unsigned long flags;
1254    struct kmemleak_object *object;
1255    int i;
1256    int new_leaks = 0;
1257
1258    jiffies_last_scan = jiffies;
1259
1260    /* prepare the kmemleak_object's */
1261    rcu_read_lock();
1262    list_for_each_entry_rcu(object, &object_list, object_list) {
1263        spin_lock_irqsave(&object->lock, flags);
1264#ifdef DEBUG
1265        /*
1266         * With a few exceptions there should be a maximum of
1267         * 1 reference to any object at this point.
1268         */
1269        if (atomic_read(&object->use_count) > 1) {
1270            pr_debug("object->use_count = %d\n",
1271                 atomic_read(&object->use_count));
1272            dump_object_info(object);
1273        }
1274#endif
1275        /* reset the reference count (whiten the object) */
1276        object->count = 0;
1277        if (color_gray(object) && get_object(object))
1278            list_add_tail(&object->gray_list, &gray_list);
1279
1280        spin_unlock_irqrestore(&object->lock, flags);
1281    }
1282    rcu_read_unlock();
1283
1284    /* data/bss scanning */
1285    scan_block(_sdata, _edata, NULL, 1);
1286    scan_block(__bss_start, __bss_stop, NULL, 1);
1287
1288#ifdef CONFIG_SMP
1289    /* per-cpu sections scanning */
1290    for_each_possible_cpu(i)
1291        scan_block(__per_cpu_start + per_cpu_offset(i),
1292               __per_cpu_end + per_cpu_offset(i), NULL, 1);
1293#endif
1294
1295    /*
1296     * Struct page scanning for each node.
1297     */
1298    lock_memory_hotplug();
1299    for_each_online_node(i) {
1300        pg_data_t *pgdat = NODE_DATA(i);
1301        unsigned long start_pfn = pgdat->node_start_pfn;
1302        unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
1303        unsigned long pfn;
1304
1305        for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1306            struct page *page;
1307
1308            if (!pfn_valid(pfn))
1309                continue;
1310            page = pfn_to_page(pfn);
1311            /* only scan if page is in use */
1312            if (page_count(page) == 0)
1313                continue;
1314            scan_block(page, page + 1, NULL, 1);
1315        }
1316    }
1317    unlock_memory_hotplug();
1318
1319    /*
1320     * Scanning the task stacks (may introduce false negatives).
1321     */
1322    if (kmemleak_stack_scan) {
1323        struct task_struct *p, *g;
1324
1325        read_lock(&tasklist_lock);
1326        do_each_thread(g, p) {
1327            scan_block(task_stack_page(p), task_stack_page(p) +
1328                   THREAD_SIZE, NULL, 0);
1329        } while_each_thread(g, p);
1330        read_unlock(&tasklist_lock);
1331    }
1332
1333    /*
1334     * Scan the objects already referenced from the sections scanned
1335     * above.
1336     */
1337    scan_gray_list();
1338
1339    /*
1340     * Check for new or unreferenced objects modified since the previous
1341     * scan and color them gray until the next scan.
1342     */
1343    rcu_read_lock();
1344    list_for_each_entry_rcu(object, &object_list, object_list) {
1345        spin_lock_irqsave(&object->lock, flags);
1346        if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1347            && update_checksum(object) && get_object(object)) {
1348            /* color it gray temporarily */
1349            object->count = object->min_count;
1350            list_add_tail(&object->gray_list, &gray_list);
1351        }
1352        spin_unlock_irqrestore(&object->lock, flags);
1353    }
1354    rcu_read_unlock();
1355
1356    /*
1357     * Re-scan the gray list for modified unreferenced objects.
1358     */
1359    scan_gray_list();
1360
1361    /*
1362     * If scanning was stopped do not report any new unreferenced objects.
1363     */
1364    if (scan_should_stop())
1365        return;
1366
1367    /*
1368     * Scanning result reporting.
1369     */
1370    rcu_read_lock();
1371    list_for_each_entry_rcu(object, &object_list, object_list) {
1372        spin_lock_irqsave(&object->lock, flags);
1373        if (unreferenced_object(object) &&
1374            !(object->flags & OBJECT_REPORTED)) {
1375            object->flags |= OBJECT_REPORTED;
1376            new_leaks++;
1377        }
1378        spin_unlock_irqrestore(&object->lock, flags);
1379    }
1380    rcu_read_unlock();
1381
1382    if (new_leaks)
1383        pr_info("%d new suspected memory leaks (see "
1384            "/sys/kernel/debug/kmemleak)\n", new_leaks);
1385
1386}
1387
1388/*
1389 * Thread function performing automatic memory scanning. Unreferenced objects
1390 * at the end of a memory scan are reported but only the first time.
1391 */
1392static int kmemleak_scan_thread(void *arg)
1393{
1394    static int first_run = 1;
1395
1396    pr_info("Automatic memory scanning thread started\n");
1397    set_user_nice(current, 10);
1398
1399    /*
1400     * Wait before the first scan to allow the system to fully initialize.
1401     */
1402    if (first_run) {
1403        first_run = 0;
1404        ssleep(SECS_FIRST_SCAN);
1405    }
1406
1407    while (!kthread_should_stop()) {
1408        signed long timeout = jiffies_scan_wait;
1409
1410        mutex_lock(&scan_mutex);
1411        kmemleak_scan();
1412        mutex_unlock(&scan_mutex);
1413
1414        /* wait before the next scan */
1415        while (timeout && !kthread_should_stop())
1416            timeout = schedule_timeout_interruptible(timeout);
1417    }
1418
1419    pr_info("Automatic memory scanning thread ended\n");
1420
1421    return 0;
1422}
1423
1424/*
1425 * Start the automatic memory scanning thread. This function must be called
1426 * with the scan_mutex held.
1427 */
1428static void start_scan_thread(void)
1429{
1430    if (scan_thread)
1431        return;
1432    scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1433    if (IS_ERR(scan_thread)) {
1434        pr_warning("Failed to create the scan thread\n");
1435        scan_thread = NULL;
1436    }
1437}
1438
1439/*
1440 * Stop the automatic memory scanning thread. This function must be called
1441 * with the scan_mutex held.
1442 */
1443static void stop_scan_thread(void)
1444{
1445    if (scan_thread) {
1446        kthread_stop(scan_thread);
1447        scan_thread = NULL;
1448    }
1449}
1450
1451/*
1452 * Iterate over the object_list and return the first valid object at or after
1453 * the required position with its use_count incremented. The function triggers
1454 * a memory scanning when the pos argument points to the first position.
1455 */
1456static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1457{
1458    struct kmemleak_object *object;
1459    loff_t n = *pos;
1460    int err;
1461
1462    err = mutex_lock_interruptible(&scan_mutex);
1463    if (err < 0)
1464        return ERR_PTR(err);
1465
1466    rcu_read_lock();
1467    list_for_each_entry_rcu(object, &object_list, object_list) {
1468        if (n-- > 0)
1469            continue;
1470        if (get_object(object))
1471            goto out;
1472    }
1473    object = NULL;
1474out:
1475    return object;
1476}
1477
1478/*
1479 * Return the next object in the object_list. The function decrements the
1480 * use_count of the previous object and increases that of the next one.
1481 */
1482static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1483{
1484    struct kmemleak_object *prev_obj = v;
1485    struct kmemleak_object *next_obj = NULL;
1486    struct list_head *n = &prev_obj->object_list;
1487
1488    ++(*pos);
1489
1490    list_for_each_continue_rcu(n, &object_list) {
1491        struct kmemleak_object *obj =
1492            list_entry(n, struct kmemleak_object, object_list);
1493        if (get_object(obj)) {
1494            next_obj = obj;
1495            break;
1496        }
1497    }
1498
1499    put_object(prev_obj);
1500    return next_obj;
1501}
1502
1503/*
1504 * Decrement the use_count of the last object required, if any.
1505 */
1506static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1507{
1508    if (!IS_ERR(v)) {
1509        /*
1510         * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1511         * waiting was interrupted, so only release it if !IS_ERR.
1512         */
1513        rcu_read_unlock();
1514        mutex_unlock(&scan_mutex);
1515        if (v)
1516            put_object(v);
1517    }
1518}
1519
1520/*
1521 * Print the information for an unreferenced object to the seq file.
1522 */
1523static int kmemleak_seq_show(struct seq_file *seq, void *v)
1524{
1525    struct kmemleak_object *object = v;
1526    unsigned long flags;
1527
1528    spin_lock_irqsave(&object->lock, flags);
1529    if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1530        print_unreferenced(seq, object);
1531    spin_unlock_irqrestore(&object->lock, flags);
1532    return 0;
1533}
1534
1535static const struct seq_operations kmemleak_seq_ops = {
1536    .start = kmemleak_seq_start,
1537    .next = kmemleak_seq_next,
1538    .stop = kmemleak_seq_stop,
1539    .show = kmemleak_seq_show,
1540};
1541
1542static int kmemleak_open(struct inode *inode, struct file *file)
1543{
1544    return seq_open(file, &kmemleak_seq_ops);
1545}
1546
1547static int kmemleak_release(struct inode *inode, struct file *file)
1548{
1549    return seq_release(inode, file);
1550}
1551
1552static int dump_str_object_info(const char *str)
1553{
1554    unsigned long flags;
1555    struct kmemleak_object *object;
1556    unsigned long addr;
1557
1558    addr= simple_strtoul(str, NULL, 0);
1559    object = find_and_get_object(addr, 0);
1560    if (!object) {
1561        pr_info("Unknown object at 0x%08lx\n", addr);
1562        return -EINVAL;
1563    }
1564
1565    spin_lock_irqsave(&object->lock, flags);
1566    dump_object_info(object);
1567    spin_unlock_irqrestore(&object->lock, flags);
1568
1569    put_object(object);
1570    return 0;
1571}
1572
1573/*
1574 * We use grey instead of black to ensure we can do future scans on the same
1575 * objects. If we did not do future scans these black objects could
1576 * potentially contain references to newly allocated objects in the future and
1577 * we'd end up with false positives.
1578 */
1579static void kmemleak_clear(void)
1580{
1581    struct kmemleak_object *object;
1582    unsigned long flags;
1583
1584    rcu_read_lock();
1585    list_for_each_entry_rcu(object, &object_list, object_list) {
1586        spin_lock_irqsave(&object->lock, flags);
1587        if ((object->flags & OBJECT_REPORTED) &&
1588            unreferenced_object(object))
1589            __paint_it(object, KMEMLEAK_GREY);
1590        spin_unlock_irqrestore(&object->lock, flags);
1591    }
1592    rcu_read_unlock();
1593}
1594
1595/*
1596 * File write operation to configure kmemleak at run-time. The following
1597 * commands can be written to the /sys/kernel/debug/kmemleak file:
1598 * off - disable kmemleak (irreversible)
1599 * stack=on - enable the task stacks scanning
1600 * stack=off - disable the tasks stacks scanning
1601 * scan=on - start the automatic memory scanning thread
1602 * scan=off - stop the automatic memory scanning thread
1603 * scan=... - set the automatic memory scanning period in seconds (0 to
1604 * disable it)
1605 * scan - trigger a memory scan
1606 * clear - mark all current reported unreferenced kmemleak objects as
1607 * grey to ignore printing them
1608 * dump=... - dump information about the object found at the given address
1609 */
1610static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1611                  size_t size, loff_t *ppos)
1612{
1613    char buf[64];
1614    int buf_size;
1615    int ret;
1616
1617    if (!atomic_read(&kmemleak_enabled))
1618        return -EBUSY;
1619
1620    buf_size = min(size, (sizeof(buf) - 1));
1621    if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1622        return -EFAULT;
1623    buf[buf_size] = 0;
1624
1625    ret = mutex_lock_interruptible(&scan_mutex);
1626    if (ret < 0)
1627        return ret;
1628
1629    if (strncmp(buf, "off", 3) == 0)
1630        kmemleak_disable();
1631    else if (strncmp(buf, "stack=on", 8) == 0)
1632        kmemleak_stack_scan = 1;
1633    else if (strncmp(buf, "stack=off", 9) == 0)
1634        kmemleak_stack_scan = 0;
1635    else if (strncmp(buf, "scan=on", 7) == 0)
1636        start_scan_thread();
1637    else if (strncmp(buf, "scan=off", 8) == 0)
1638        stop_scan_thread();
1639    else if (strncmp(buf, "scan=", 5) == 0) {
1640        unsigned long secs;
1641
1642        ret = strict_strtoul(buf + 5, 0, &secs);
1643        if (ret < 0)
1644            goto out;
1645        stop_scan_thread();
1646        if (secs) {
1647            jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1648            start_scan_thread();
1649        }
1650    } else if (strncmp(buf, "scan", 4) == 0)
1651        kmemleak_scan();
1652    else if (strncmp(buf, "clear", 5) == 0)
1653        kmemleak_clear();
1654    else if (strncmp(buf, "dump=", 5) == 0)
1655        ret = dump_str_object_info(buf + 5);
1656    else
1657        ret = -EINVAL;
1658
1659out:
1660    mutex_unlock(&scan_mutex);
1661    if (ret < 0)
1662        return ret;
1663
1664    /* ignore the rest of the buffer, only one command at a time */
1665    *ppos += size;
1666    return size;
1667}
1668
1669static const struct file_operations kmemleak_fops = {
1670    .owner = THIS_MODULE,
1671    .open = kmemleak_open,
1672    .read = seq_read,
1673    .write = kmemleak_write,
1674    .llseek = seq_lseek,
1675    .release = kmemleak_release,
1676};
1677
1678/*
1679 * Stop the memory scanning thread and free the kmemleak internal objects if
1680 * no previous scan thread (otherwise, kmemleak may still have some useful
1681 * information on memory leaks).
1682 */
1683static void kmemleak_do_cleanup(struct work_struct *work)
1684{
1685    struct kmemleak_object *object;
1686    bool cleanup = scan_thread == NULL;
1687
1688    mutex_lock(&scan_mutex);
1689    stop_scan_thread();
1690
1691    if (cleanup) {
1692        rcu_read_lock();
1693        list_for_each_entry_rcu(object, &object_list, object_list)
1694            delete_object_full(object->pointer);
1695        rcu_read_unlock();
1696    }
1697    mutex_unlock(&scan_mutex);
1698}
1699
1700static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1701
1702/*
1703 * Disable kmemleak. No memory allocation/freeing will be traced once this
1704 * function is called. Disabling kmemleak is an irreversible operation.
1705 */
1706static void kmemleak_disable(void)
1707{
1708    /* atomically check whether it was already invoked */
1709    if (atomic_cmpxchg(&kmemleak_error, 0, 1))
1710        return;
1711
1712    /* stop any memory operation tracing */
1713    atomic_set(&kmemleak_enabled, 0);
1714
1715    /* check whether it is too early for a kernel thread */
1716    if (atomic_read(&kmemleak_initialized))
1717        schedule_work(&cleanup_work);
1718
1719    pr_info("Kernel memory leak detector disabled\n");
1720}
1721
1722/*
1723 * Allow boot-time kmemleak disabling (enabled by default).
1724 */
1725static int kmemleak_boot_config(char *str)
1726{
1727    if (!str)
1728        return -EINVAL;
1729    if (strcmp(str, "off") == 0)
1730        kmemleak_disable();
1731    else if (strcmp(str, "on") == 0)
1732        kmemleak_skip_disable = 1;
1733    else
1734        return -EINVAL;
1735    return 0;
1736}
1737early_param("kmemleak", kmemleak_boot_config);
1738
1739static void __init print_log_trace(struct early_log *log)
1740{
1741    struct stack_trace trace;
1742
1743    trace.nr_entries = log->trace_len;
1744    trace.entries = log->trace;
1745
1746    pr_notice("Early log backtrace:\n");
1747    print_stack_trace(&trace, 2);
1748}
1749
1750/*
1751 * Kmemleak initialization.
1752 */
1753void __init kmemleak_init(void)
1754{
1755    int i;
1756    unsigned long flags;
1757
1758#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1759    if (!kmemleak_skip_disable) {
1760        atomic_set(&kmemleak_early_log, 0);
1761        kmemleak_disable();
1762        return;
1763    }
1764#endif
1765
1766    jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1767    jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1768
1769    object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1770    scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1771    INIT_PRIO_TREE_ROOT(&object_tree_root);
1772
1773    if (crt_early_log >= ARRAY_SIZE(early_log))
1774        pr_warning("Early log buffer exceeded (%d), please increase "
1775               "DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n", crt_early_log);
1776
1777    /* the kernel is still in UP mode, so disabling the IRQs is enough */
1778    local_irq_save(flags);
1779    atomic_set(&kmemleak_early_log, 0);
1780    if (atomic_read(&kmemleak_error)) {
1781        local_irq_restore(flags);
1782        return;
1783    } else
1784        atomic_set(&kmemleak_enabled, 1);
1785    local_irq_restore(flags);
1786
1787    /*
1788     * This is the point where tracking allocations is safe. Automatic
1789     * scanning is started during the late initcall. Add the early logged
1790     * callbacks to the kmemleak infrastructure.
1791     */
1792    for (i = 0; i < crt_early_log; i++) {
1793        struct early_log *log = &early_log[i];
1794
1795        switch (log->op_type) {
1796        case KMEMLEAK_ALLOC:
1797            early_alloc(log);
1798            break;
1799        case KMEMLEAK_ALLOC_PERCPU:
1800            early_alloc_percpu(log);
1801            break;
1802        case KMEMLEAK_FREE:
1803            kmemleak_free(log->ptr);
1804            break;
1805        case KMEMLEAK_FREE_PART:
1806            kmemleak_free_part(log->ptr, log->size);
1807            break;
1808        case KMEMLEAK_FREE_PERCPU:
1809            kmemleak_free_percpu(log->ptr);
1810            break;
1811        case KMEMLEAK_NOT_LEAK:
1812            kmemleak_not_leak(log->ptr);
1813            break;
1814        case KMEMLEAK_IGNORE:
1815            kmemleak_ignore(log->ptr);
1816            break;
1817        case KMEMLEAK_SCAN_AREA:
1818            kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
1819            break;
1820        case KMEMLEAK_NO_SCAN:
1821            kmemleak_no_scan(log->ptr);
1822            break;
1823        default:
1824            kmemleak_warn("Unknown early log operation: %d\n",
1825                      log->op_type);
1826        }
1827
1828        if (atomic_read(&kmemleak_warning)) {
1829            print_log_trace(log);
1830            atomic_set(&kmemleak_warning, 0);
1831        }
1832    }
1833}
1834
1835/*
1836 * Late initialization function.
1837 */
1838static int __init kmemleak_late_init(void)
1839{
1840    struct dentry *dentry;
1841
1842    atomic_set(&kmemleak_initialized, 1);
1843
1844    if (atomic_read(&kmemleak_error)) {
1845        /*
1846         * Some error occurred and kmemleak was disabled. There is a
1847         * small chance that kmemleak_disable() was called immediately
1848         * after setting kmemleak_initialized and we may end up with
1849         * two clean-up threads but serialized by scan_mutex.
1850         */
1851        schedule_work(&cleanup_work);
1852        return -ENOMEM;
1853    }
1854
1855    dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
1856                     &kmemleak_fops);
1857    if (!dentry)
1858        pr_warning("Failed to create the debugfs kmemleak file\n");
1859    mutex_lock(&scan_mutex);
1860    start_scan_thread();
1861    mutex_unlock(&scan_mutex);
1862
1863    pr_info("Kernel memory leak detector initialized\n");
1864
1865    return 0;
1866}
1867late_initcall(kmemleak_late_init);
1868

Archive Download this file



interactive