Root/kernel/lockdep.c

1/*
2 * kernel/lockdep.c
3 *
4 * Runtime locking correctness validator
5 *
6 * Started by Ingo Molnar:
7 *
8 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
10 *
11 * this code maps all the lock dependencies as they occur in a live kernel
12 * and will warn about the following classes of locking bugs:
13 *
14 * - lock inversion scenarios
15 * - circular lock dependencies
16 * - hardirq/softirq safe/unsafe locking bugs
17 *
18 * Bugs are reported even if the current locking scenario does not cause
19 * any deadlock at this point.
20 *
21 * I.e. if anytime in the past two locks were taken in a different order,
22 * even if it happened for another task, even if those were different
23 * locks (but of the same class as this lock), this code will detect it.
24 *
25 * Thanks to Arjan van de Ven for coming up with the initial idea of
26 * mapping lock dependencies runtime.
27 */
28#define DISABLE_BRANCH_PROFILING
29#include <linux/mutex.h>
30#include <linux/sched.h>
31#include <linux/delay.h>
32#include <linux/module.h>
33#include <linux/proc_fs.h>
34#include <linux/seq_file.h>
35#include <linux/spinlock.h>
36#include <linux/kallsyms.h>
37#include <linux/interrupt.h>
38#include <linux/stacktrace.h>
39#include <linux/debug_locks.h>
40#include <linux/irqflags.h>
41#include <linux/utsname.h>
42#include <linux/hash.h>
43#include <linux/ftrace.h>
44#include <linux/stringify.h>
45#include <linux/bitops.h>
46
47#include <asm/sections.h>
48
49#include "lockdep_internals.h"
50
51#define CREATE_TRACE_POINTS
52#include <trace/events/lockdep.h>
53
54#ifdef CONFIG_PROVE_LOCKING
55int prove_locking = 1;
56module_param(prove_locking, int, 0644);
57#else
58#define prove_locking 0
59#endif
60
61#ifdef CONFIG_LOCK_STAT
62int lock_stat = 1;
63module_param(lock_stat, int, 0644);
64#else
65#define lock_stat 0
66#endif
67
68/*
69 * lockdep_lock: protects the lockdep graph, the hashes and the
70 * class/list/hash allocators.
71 *
72 * This is one of the rare exceptions where it's justified
73 * to use a raw spinlock - we really dont want the spinlock
74 * code to recurse back into the lockdep code...
75 */
76static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
77
78static int graph_lock(void)
79{
80    __raw_spin_lock(&lockdep_lock);
81    /*
82     * Make sure that if another CPU detected a bug while
83     * walking the graph we dont change it (while the other
84     * CPU is busy printing out stuff with the graph lock
85     * dropped already)
86     */
87    if (!debug_locks) {
88        __raw_spin_unlock(&lockdep_lock);
89        return 0;
90    }
91    /* prevent any recursions within lockdep from causing deadlocks */
92    current->lockdep_recursion++;
93    return 1;
94}
95
96static inline int graph_unlock(void)
97{
98    if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))
99        return DEBUG_LOCKS_WARN_ON(1);
100
101    current->lockdep_recursion--;
102    __raw_spin_unlock(&lockdep_lock);
103    return 0;
104}
105
106/*
107 * Turn lock debugging off and return with 0 if it was off already,
108 * and also release the graph lock:
109 */
110static inline int debug_locks_off_graph_unlock(void)
111{
112    int ret = debug_locks_off();
113
114    __raw_spin_unlock(&lockdep_lock);
115
116    return ret;
117}
118
119static int lockdep_initialized;
120
121unsigned long nr_list_entries;
122static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
123
124/*
125 * All data structures here are protected by the global debug_lock.
126 *
127 * Mutex key structs only get allocated, once during bootup, and never
128 * get freed - this significantly simplifies the debugging code.
129 */
130unsigned long nr_lock_classes;
131static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
132
133static inline struct lock_class *hlock_class(struct held_lock *hlock)
134{
135    if (!hlock->class_idx) {
136        DEBUG_LOCKS_WARN_ON(1);
137        return NULL;
138    }
139    return lock_classes + hlock->class_idx - 1;
140}
141
142#ifdef CONFIG_LOCK_STAT
143static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
144
145static inline u64 lockstat_clock(void)
146{
147    return cpu_clock(smp_processor_id());
148}
149
150static int lock_point(unsigned long points[], unsigned long ip)
151{
152    int i;
153
154    for (i = 0; i < LOCKSTAT_POINTS; i++) {
155        if (points[i] == 0) {
156            points[i] = ip;
157            break;
158        }
159        if (points[i] == ip)
160            break;
161    }
162
163    return i;
164}
165
166static void lock_time_inc(struct lock_time *lt, u64 time)
167{
168    if (time > lt->max)
169        lt->max = time;
170
171    if (time < lt->min || !lt->min)
172        lt->min = time;
173
174    lt->total += time;
175    lt->nr++;
176}
177
178static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
179{
180    dst->min += src->min;
181    dst->max += src->max;
182    dst->total += src->total;
183    dst->nr += src->nr;
184}
185
186struct lock_class_stats lock_stats(struct lock_class *class)
187{
188    struct lock_class_stats stats;
189    int cpu, i;
190
191    memset(&stats, 0, sizeof(struct lock_class_stats));
192    for_each_possible_cpu(cpu) {
193        struct lock_class_stats *pcs =
194            &per_cpu(lock_stats, cpu)[class - lock_classes];
195
196        for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
197            stats.contention_point[i] += pcs->contention_point[i];
198
199        for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
200            stats.contending_point[i] += pcs->contending_point[i];
201
202        lock_time_add(&pcs->read_waittime, &stats.read_waittime);
203        lock_time_add(&pcs->write_waittime, &stats.write_waittime);
204
205        lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
206        lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
207
208        for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
209            stats.bounces[i] += pcs->bounces[i];
210    }
211
212    return stats;
213}
214
215void clear_lock_stats(struct lock_class *class)
216{
217    int cpu;
218
219    for_each_possible_cpu(cpu) {
220        struct lock_class_stats *cpu_stats =
221            &per_cpu(lock_stats, cpu)[class - lock_classes];
222
223        memset(cpu_stats, 0, sizeof(struct lock_class_stats));
224    }
225    memset(class->contention_point, 0, sizeof(class->contention_point));
226    memset(class->contending_point, 0, sizeof(class->contending_point));
227}
228
229static struct lock_class_stats *get_lock_stats(struct lock_class *class)
230{
231    return &get_cpu_var(lock_stats)[class - lock_classes];
232}
233
234static void put_lock_stats(struct lock_class_stats *stats)
235{
236    put_cpu_var(lock_stats);
237}
238
239static void lock_release_holdtime(struct held_lock *hlock)
240{
241    struct lock_class_stats *stats;
242    u64 holdtime;
243
244    if (!lock_stat)
245        return;
246
247    holdtime = lockstat_clock() - hlock->holdtime_stamp;
248
249    stats = get_lock_stats(hlock_class(hlock));
250    if (hlock->read)
251        lock_time_inc(&stats->read_holdtime, holdtime);
252    else
253        lock_time_inc(&stats->write_holdtime, holdtime);
254    put_lock_stats(stats);
255}
256#else
257static inline void lock_release_holdtime(struct held_lock *hlock)
258{
259}
260#endif
261
262/*
263 * We keep a global list of all lock classes. The list only grows,
264 * never shrinks. The list is only accessed with the lockdep
265 * spinlock lock held.
266 */
267LIST_HEAD(all_lock_classes);
268
269/*
270 * The lockdep classes are in a hash-table as well, for fast lookup:
271 */
272#define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
273#define CLASSHASH_SIZE (1UL << CLASSHASH_BITS)
274#define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS)
275#define classhashentry(key) (classhash_table + __classhashfn((key)))
276
277static struct list_head classhash_table[CLASSHASH_SIZE];
278
279/*
280 * We put the lock dependency chains into a hash-table as well, to cache
281 * their existence:
282 */
283#define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1)
284#define CHAINHASH_SIZE (1UL << CHAINHASH_BITS)
285#define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS)
286#define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
287
288static struct list_head chainhash_table[CHAINHASH_SIZE];
289
290/*
291 * The hash key of the lock dependency chains is a hash itself too:
292 * it's a hash of all locks taken up to that lock, including that lock.
293 * It's a 64-bit hash, because it's important for the keys to be
294 * unique.
295 */
296#define iterate_chain_key(key1, key2) \
297    (((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \
298    ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \
299    (key2))
300
301void lockdep_off(void)
302{
303    current->lockdep_recursion++;
304}
305EXPORT_SYMBOL(lockdep_off);
306
307void lockdep_on(void)
308{
309    current->lockdep_recursion--;
310}
311EXPORT_SYMBOL(lockdep_on);
312
313/*
314 * Debugging switches:
315 */
316
317#define VERBOSE 0
318#define VERY_VERBOSE 0
319
320#if VERBOSE
321# define HARDIRQ_VERBOSE 1
322# define SOFTIRQ_VERBOSE 1
323# define RECLAIM_VERBOSE 1
324#else
325# define HARDIRQ_VERBOSE 0
326# define SOFTIRQ_VERBOSE 0
327# define RECLAIM_VERBOSE 0
328#endif
329
330#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE
331/*
332 * Quick filtering for interesting events:
333 */
334static int class_filter(struct lock_class *class)
335{
336#if 0
337    /* Example */
338    if (class->name_version == 1 &&
339            !strcmp(class->name, "lockname"))
340        return 1;
341    if (class->name_version == 1 &&
342            !strcmp(class->name, "&struct->lockfield"))
343        return 1;
344#endif
345    /* Filter everything else. 1 would be to allow everything else */
346    return 0;
347}
348#endif
349
350static int verbose(struct lock_class *class)
351{
352#if VERBOSE
353    return class_filter(class);
354#endif
355    return 0;
356}
357
358/*
359 * Stack-trace: tightly packed array of stack backtrace
360 * addresses. Protected by the graph_lock.
361 */
362unsigned long nr_stack_trace_entries;
363static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
364
365static int save_trace(struct stack_trace *trace)
366{
367    trace->nr_entries = 0;
368    trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
369    trace->entries = stack_trace + nr_stack_trace_entries;
370
371    trace->skip = 3;
372
373    save_stack_trace(trace);
374
375    /*
376     * Some daft arches put -1 at the end to indicate its a full trace.
377     *
378     * <rant> this is buggy anyway, since it takes a whole extra entry so a
379     * complete trace that maxes out the entries provided will be reported
380     * as incomplete, friggin useless </rant>
381     */
382    if (trace->entries[trace->nr_entries-1] == ULONG_MAX)
383        trace->nr_entries--;
384
385    trace->max_entries = trace->nr_entries;
386
387    nr_stack_trace_entries += trace->nr_entries;
388
389    if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
390        if (!debug_locks_off_graph_unlock())
391            return 0;
392
393        printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
394        printk("turning off the locking correctness validator.\n");
395        dump_stack();
396
397        return 0;
398    }
399
400    return 1;
401}
402
403unsigned int nr_hardirq_chains;
404unsigned int nr_softirq_chains;
405unsigned int nr_process_chains;
406unsigned int max_lockdep_depth;
407
408#ifdef CONFIG_DEBUG_LOCKDEP
409/*
410 * We cannot printk in early bootup code. Not even early_printk()
411 * might work. So we mark any initialization errors and printk
412 * about it later on, in lockdep_info().
413 */
414static int lockdep_init_error;
415static unsigned long lockdep_init_trace_data[20];
416static struct stack_trace lockdep_init_trace = {
417    .max_entries = ARRAY_SIZE(lockdep_init_trace_data),
418    .entries = lockdep_init_trace_data,
419};
420
421/*
422 * Various lockdep statistics:
423 */
424atomic_t chain_lookup_hits;
425atomic_t chain_lookup_misses;
426atomic_t hardirqs_on_events;
427atomic_t hardirqs_off_events;
428atomic_t redundant_hardirqs_on;
429atomic_t redundant_hardirqs_off;
430atomic_t softirqs_on_events;
431atomic_t softirqs_off_events;
432atomic_t redundant_softirqs_on;
433atomic_t redundant_softirqs_off;
434atomic_t nr_unused_locks;
435atomic_t nr_cyclic_checks;
436atomic_t nr_find_usage_forwards_checks;
437atomic_t nr_find_usage_backwards_checks;
438#endif
439
440/*
441 * Locking printouts:
442 */
443
444#define __USAGE(__STATE) \
445    [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \
446    [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \
447    [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
448    [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
449
450static const char *usage_str[] =
451{
452#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
453#include "lockdep_states.h"
454#undef LOCKDEP_STATE
455    [LOCK_USED] = "INITIAL USE",
456};
457
458const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
459{
460    return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
461}
462
463static inline unsigned long lock_flag(enum lock_usage_bit bit)
464{
465    return 1UL << bit;
466}
467
468static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
469{
470    char c = '.';
471
472    if (class->usage_mask & lock_flag(bit + 2))
473        c = '+';
474    if (class->usage_mask & lock_flag(bit)) {
475        c = '-';
476        if (class->usage_mask & lock_flag(bit + 2))
477            c = '?';
478    }
479
480    return c;
481}
482
483void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
484{
485    int i = 0;
486
487#define LOCKDEP_STATE(__STATE) \
488    usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \
489    usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
490#include "lockdep_states.h"
491#undef LOCKDEP_STATE
492
493    usage[i] = '\0';
494}
495
496static void print_lock_name(struct lock_class *class)
497{
498    char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS];
499    const char *name;
500
501    get_usage_chars(class, usage);
502
503    name = class->name;
504    if (!name) {
505        name = __get_key_name(class->key, str);
506        printk(" (%s", name);
507    } else {
508        printk(" (%s", name);
509        if (class->name_version > 1)
510            printk("#%d", class->name_version);
511        if (class->subclass)
512            printk("/%d", class->subclass);
513    }
514    printk("){%s}", usage);
515}
516
517static void print_lockdep_cache(struct lockdep_map *lock)
518{
519    const char *name;
520    char str[KSYM_NAME_LEN];
521
522    name = lock->name;
523    if (!name)
524        name = __get_key_name(lock->key->subkeys, str);
525
526    printk("%s", name);
527}
528
529static void print_lock(struct held_lock *hlock)
530{
531    print_lock_name(hlock_class(hlock));
532    printk(", at: ");
533    print_ip_sym(hlock->acquire_ip);
534}
535
536static void lockdep_print_held_locks(struct task_struct *curr)
537{
538    int i, depth = curr->lockdep_depth;
539
540    if (!depth) {
541        printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr));
542        return;
543    }
544    printk("%d lock%s held by %s/%d:\n",
545        depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr));
546
547    for (i = 0; i < depth; i++) {
548        printk(" #%d: ", i);
549        print_lock(curr->held_locks + i);
550    }
551}
552
553static void print_kernel_version(void)
554{
555    printk("%s %.*s\n", init_utsname()->release,
556        (int)strcspn(init_utsname()->version, " "),
557        init_utsname()->version);
558}
559
560static int very_verbose(struct lock_class *class)
561{
562#if VERY_VERBOSE
563    return class_filter(class);
564#endif
565    return 0;
566}
567
568/*
569 * Is this the address of a static object:
570 */
571static int static_obj(void *obj)
572{
573    unsigned long start = (unsigned long) &_stext,
574              end = (unsigned long) &_end,
575              addr = (unsigned long) obj;
576#ifdef CONFIG_SMP
577    int i;
578#endif
579
580    /*
581     * static variable?
582     */
583    if ((addr >= start) && (addr < end))
584        return 1;
585
586    if (arch_is_kernel_data(addr))
587        return 1;
588
589#ifdef CONFIG_SMP
590    /*
591     * percpu var?
592     */
593    for_each_possible_cpu(i) {
594        start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
595        end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
596                    + per_cpu_offset(i);
597
598        if ((addr >= start) && (addr < end))
599            return 1;
600    }
601#endif
602
603    /*
604     * module var?
605     */
606    return is_module_address(addr);
607}
608
609/*
610 * To make lock name printouts unique, we calculate a unique
611 * class->name_version generation counter:
612 */
613static int count_matching_names(struct lock_class *new_class)
614{
615    struct lock_class *class;
616    int count = 0;
617
618    if (!new_class->name)
619        return 0;
620
621    list_for_each_entry(class, &all_lock_classes, lock_entry) {
622        if (new_class->key - new_class->subclass == class->key)
623            return class->name_version;
624        if (class->name && !strcmp(class->name, new_class->name))
625            count = max(count, class->name_version);
626    }
627
628    return count + 1;
629}
630
631/*
632 * Register a lock's class in the hash-table, if the class is not present
633 * yet. Otherwise we look it up. We cache the result in the lock object
634 * itself, so actual lookup of the hash should be once per lock object.
635 */
636static inline struct lock_class *
637look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
638{
639    struct lockdep_subclass_key *key;
640    struct list_head *hash_head;
641    struct lock_class *class;
642
643#ifdef CONFIG_DEBUG_LOCKDEP
644    /*
645     * If the architecture calls into lockdep before initializing
646     * the hashes then we'll warn about it later. (we cannot printk
647     * right now)
648     */
649    if (unlikely(!lockdep_initialized)) {
650        lockdep_init();
651        lockdep_init_error = 1;
652        save_stack_trace(&lockdep_init_trace);
653    }
654#endif
655
656    /*
657     * Static locks do not have their class-keys yet - for them the key
658     * is the lock object itself:
659     */
660    if (unlikely(!lock->key))
661        lock->key = (void *)lock;
662
663    /*
664     * NOTE: the class-key must be unique. For dynamic locks, a static
665     * lock_class_key variable is passed in through the mutex_init()
666     * (or spin_lock_init()) call - which acts as the key. For static
667     * locks we use the lock object itself as the key.
668     */
669    BUILD_BUG_ON(sizeof(struct lock_class_key) >
670            sizeof(struct lockdep_map));
671
672    key = lock->key->subkeys + subclass;
673
674    hash_head = classhashentry(key);
675
676    /*
677     * We can walk the hash lockfree, because the hash only
678     * grows, and we are careful when adding entries to the end:
679     */
680    list_for_each_entry(class, hash_head, hash_entry) {
681        if (class->key == key) {
682            WARN_ON_ONCE(class->name != lock->name);
683            return class;
684        }
685    }
686
687    return NULL;
688}
689
690/*
691 * Register a lock's class in the hash-table, if the class is not present
692 * yet. Otherwise we look it up. We cache the result in the lock object
693 * itself, so actual lookup of the hash should be once per lock object.
694 */
695static inline struct lock_class *
696register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
697{
698    struct lockdep_subclass_key *key;
699    struct list_head *hash_head;
700    struct lock_class *class;
701    unsigned long flags;
702
703    class = look_up_lock_class(lock, subclass);
704    if (likely(class))
705        return class;
706
707    /*
708     * Debug-check: all keys must be persistent!
709      */
710    if (!static_obj(lock->key)) {
711        debug_locks_off();
712        printk("INFO: trying to register non-static key.\n");
713        printk("the code is fine but needs lockdep annotation.\n");
714        printk("turning off the locking correctness validator.\n");
715        dump_stack();
716
717        return NULL;
718    }
719
720    key = lock->key->subkeys + subclass;
721    hash_head = classhashentry(key);
722
723    raw_local_irq_save(flags);
724    if (!graph_lock()) {
725        raw_local_irq_restore(flags);
726        return NULL;
727    }
728    /*
729     * We have to do the hash-walk again, to avoid races
730     * with another CPU:
731     */
732    list_for_each_entry(class, hash_head, hash_entry)
733        if (class->key == key)
734            goto out_unlock_set;
735    /*
736     * Allocate a new key from the static array, and add it to
737     * the hash:
738     */
739    if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
740        if (!debug_locks_off_graph_unlock()) {
741            raw_local_irq_restore(flags);
742            return NULL;
743        }
744        raw_local_irq_restore(flags);
745
746        printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
747        printk("turning off the locking correctness validator.\n");
748        dump_stack();
749        return NULL;
750    }
751    class = lock_classes + nr_lock_classes++;
752    debug_atomic_inc(&nr_unused_locks);
753    class->key = key;
754    class->name = lock->name;
755    class->subclass = subclass;
756    INIT_LIST_HEAD(&class->lock_entry);
757    INIT_LIST_HEAD(&class->locks_before);
758    INIT_LIST_HEAD(&class->locks_after);
759    class->name_version = count_matching_names(class);
760    /*
761     * We use RCU's safe list-add method to make
762     * parallel walking of the hash-list safe:
763     */
764    list_add_tail_rcu(&class->hash_entry, hash_head);
765    /*
766     * Add it to the global list of classes:
767     */
768    list_add_tail_rcu(&class->lock_entry, &all_lock_classes);
769
770    if (verbose(class)) {
771        graph_unlock();
772        raw_local_irq_restore(flags);
773
774        printk("\nnew class %p: %s", class->key, class->name);
775        if (class->name_version > 1)
776            printk("#%d", class->name_version);
777        printk("\n");
778        dump_stack();
779
780        raw_local_irq_save(flags);
781        if (!graph_lock()) {
782            raw_local_irq_restore(flags);
783            return NULL;
784        }
785    }
786out_unlock_set:
787    graph_unlock();
788    raw_local_irq_restore(flags);
789
790    if (!subclass || force)
791        lock->class_cache = class;
792
793    if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
794        return NULL;
795
796    return class;
797}
798
799#ifdef CONFIG_PROVE_LOCKING
800/*
801 * Allocate a lockdep entry. (assumes the graph_lock held, returns
802 * with NULL on failure)
803 */
804static struct lock_list *alloc_list_entry(void)
805{
806    if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
807        if (!debug_locks_off_graph_unlock())
808            return NULL;
809
810        printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
811        printk("turning off the locking correctness validator.\n");
812        dump_stack();
813        return NULL;
814    }
815    return list_entries + nr_list_entries++;
816}
817
818/*
819 * Add a new dependency to the head of the list:
820 */
821static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
822                struct list_head *head, unsigned long ip, int distance)
823{
824    struct lock_list *entry;
825    /*
826     * Lock not present yet - get a new dependency struct and
827     * add it to the list:
828     */
829    entry = alloc_list_entry();
830    if (!entry)
831        return 0;
832
833    if (!save_trace(&entry->trace))
834        return 0;
835
836    entry->class = this;
837    entry->distance = distance;
838    /*
839     * Since we never remove from the dependency list, the list can
840     * be walked lockless by other CPUs, it's only allocation
841     * that must be protected by the spinlock. But this also means
842     * we must make new entries visible only once writes to the
843     * entry become visible - hence the RCU op:
844     */
845    list_add_tail_rcu(&entry->entry, head);
846
847    return 1;
848}
849
850/*
851 * For good efficiency of modular, we use power of 2
852 */
853#define MAX_CIRCULAR_QUEUE_SIZE 4096UL
854#define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1)
855
856/*
857 * The circular_queue and helpers is used to implement the
858 * breadth-first search(BFS)algorithem, by which we can build
859 * the shortest path from the next lock to be acquired to the
860 * previous held lock if there is a circular between them.
861 */
862struct circular_queue {
863    unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
864    unsigned int front, rear;
865};
866
867static struct circular_queue lock_cq;
868
869unsigned int max_bfs_queue_depth;
870
871static unsigned int lockdep_dependency_gen_id;
872
873static inline void __cq_init(struct circular_queue *cq)
874{
875    cq->front = cq->rear = 0;
876    lockdep_dependency_gen_id++;
877}
878
879static inline int __cq_empty(struct circular_queue *cq)
880{
881    return (cq->front == cq->rear);
882}
883
884static inline int __cq_full(struct circular_queue *cq)
885{
886    return ((cq->rear + 1) & CQ_MASK) == cq->front;
887}
888
889static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
890{
891    if (__cq_full(cq))
892        return -1;
893
894    cq->element[cq->rear] = elem;
895    cq->rear = (cq->rear + 1) & CQ_MASK;
896    return 0;
897}
898
899static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
900{
901    if (__cq_empty(cq))
902        return -1;
903
904    *elem = cq->element[cq->front];
905    cq->front = (cq->front + 1) & CQ_MASK;
906    return 0;
907}
908
909static inline unsigned int __cq_get_elem_count(struct circular_queue *cq)
910{
911    return (cq->rear - cq->front) & CQ_MASK;
912}
913
914static inline void mark_lock_accessed(struct lock_list *lock,
915                    struct lock_list *parent)
916{
917    unsigned long nr;
918
919    nr = lock - list_entries;
920    WARN_ON(nr >= nr_list_entries);
921    lock->parent = parent;
922    lock->class->dep_gen_id = lockdep_dependency_gen_id;
923}
924
925static inline unsigned long lock_accessed(struct lock_list *lock)
926{
927    unsigned long nr;
928
929    nr = lock - list_entries;
930    WARN_ON(nr >= nr_list_entries);
931    return lock->class->dep_gen_id == lockdep_dependency_gen_id;
932}
933
934static inline struct lock_list *get_lock_parent(struct lock_list *child)
935{
936    return child->parent;
937}
938
939static inline int get_lock_depth(struct lock_list *child)
940{
941    int depth = 0;
942    struct lock_list *parent;
943
944    while ((parent = get_lock_parent(child))) {
945        child = parent;
946        depth++;
947    }
948    return depth;
949}
950
951static int __bfs(struct lock_list *source_entry,
952         void *data,
953         int (*match)(struct lock_list *entry, void *data),
954         struct lock_list **target_entry,
955         int forward)
956{
957    struct lock_list *entry;
958    struct list_head *head;
959    struct circular_queue *cq = &lock_cq;
960    int ret = 1;
961
962    if (match(source_entry, data)) {
963        *target_entry = source_entry;
964        ret = 0;
965        goto exit;
966    }
967
968    if (forward)
969        head = &source_entry->class->locks_after;
970    else
971        head = &source_entry->class->locks_before;
972
973    if (list_empty(head))
974        goto exit;
975
976    __cq_init(cq);
977    __cq_enqueue(cq, (unsigned long)source_entry);
978
979    while (!__cq_empty(cq)) {
980        struct lock_list *lock;
981
982        __cq_dequeue(cq, (unsigned long *)&lock);
983
984        if (!lock->class) {
985            ret = -2;
986            goto exit;
987        }
988
989        if (forward)
990            head = &lock->class->locks_after;
991        else
992            head = &lock->class->locks_before;
993
994        list_for_each_entry(entry, head, entry) {
995            if (!lock_accessed(entry)) {
996                unsigned int cq_depth;
997                mark_lock_accessed(entry, lock);
998                if (match(entry, data)) {
999                    *target_entry = entry;
1000                    ret = 0;
1001                    goto exit;
1002                }
1003
1004                if (__cq_enqueue(cq, (unsigned long)entry)) {
1005                    ret = -1;
1006                    goto exit;
1007                }
1008                cq_depth = __cq_get_elem_count(cq);
1009                if (max_bfs_queue_depth < cq_depth)
1010                    max_bfs_queue_depth = cq_depth;
1011            }
1012        }
1013    }
1014exit:
1015    return ret;
1016}
1017
1018static inline int __bfs_forwards(struct lock_list *src_entry,
1019            void *data,
1020            int (*match)(struct lock_list *entry, void *data),
1021            struct lock_list **target_entry)
1022{
1023    return __bfs(src_entry, data, match, target_entry, 1);
1024
1025}
1026
1027static inline int __bfs_backwards(struct lock_list *src_entry,
1028            void *data,
1029            int (*match)(struct lock_list *entry, void *data),
1030            struct lock_list **target_entry)
1031{
1032    return __bfs(src_entry, data, match, target_entry, 0);
1033
1034}
1035
1036/*
1037 * Recursive, forwards-direction lock-dependency checking, used for
1038 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
1039 * checking.
1040 */
1041
1042/*
1043 * Print a dependency chain entry (this is only done when a deadlock
1044 * has been detected):
1045 */
1046static noinline int
1047print_circular_bug_entry(struct lock_list *target, int depth)
1048{
1049    if (debug_locks_silent)
1050        return 0;
1051    printk("\n-> #%u", depth);
1052    print_lock_name(target->class);
1053    printk(":\n");
1054    print_stack_trace(&target->trace, 6);
1055
1056    return 0;
1057}
1058
1059/*
1060 * When a circular dependency is detected, print the
1061 * header first:
1062 */
1063static noinline int
1064print_circular_bug_header(struct lock_list *entry, unsigned int depth,
1065            struct held_lock *check_src,
1066            struct held_lock *check_tgt)
1067{
1068    struct task_struct *curr = current;
1069
1070    if (debug_locks_silent)
1071        return 0;
1072
1073    printk("\n=======================================================\n");
1074    printk( "[ INFO: possible circular locking dependency detected ]\n");
1075    print_kernel_version();
1076    printk( "-------------------------------------------------------\n");
1077    printk("%s/%d is trying to acquire lock:\n",
1078        curr->comm, task_pid_nr(curr));
1079    print_lock(check_src);
1080    printk("\nbut task is already holding lock:\n");
1081    print_lock(check_tgt);
1082    printk("\nwhich lock already depends on the new lock.\n\n");
1083    printk("\nthe existing dependency chain (in reverse order) is:\n");
1084
1085    print_circular_bug_entry(entry, depth);
1086
1087    return 0;
1088}
1089
1090static inline int class_equal(struct lock_list *entry, void *data)
1091{
1092    return entry->class == data;
1093}
1094
1095static noinline int print_circular_bug(struct lock_list *this,
1096                struct lock_list *target,
1097                struct held_lock *check_src,
1098                struct held_lock *check_tgt)
1099{
1100    struct task_struct *curr = current;
1101    struct lock_list *parent;
1102    int depth;
1103
1104    if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1105        return 0;
1106
1107    if (!save_trace(&this->trace))
1108        return 0;
1109
1110    depth = get_lock_depth(target);
1111
1112    print_circular_bug_header(target, depth, check_src, check_tgt);
1113
1114    parent = get_lock_parent(target);
1115
1116    while (parent) {
1117        print_circular_bug_entry(parent, --depth);
1118        parent = get_lock_parent(parent);
1119    }
1120
1121    printk("\nother info that might help us debug this:\n\n");
1122    lockdep_print_held_locks(curr);
1123
1124    printk("\nstack backtrace:\n");
1125    dump_stack();
1126
1127    return 0;
1128}
1129
1130static noinline int print_bfs_bug(int ret)
1131{
1132    if (!debug_locks_off_graph_unlock())
1133        return 0;
1134
1135    WARN(1, "lockdep bfs error:%d\n", ret);
1136
1137    return 0;
1138}
1139
1140static int noop_count(struct lock_list *entry, void *data)
1141{
1142    (*(unsigned long *)data)++;
1143    return 0;
1144}
1145
1146unsigned long __lockdep_count_forward_deps(struct lock_list *this)
1147{
1148    unsigned long count = 0;
1149    struct lock_list *uninitialized_var(target_entry);
1150
1151    __bfs_forwards(this, (void *)&count, noop_count, &target_entry);
1152
1153    return count;
1154}
1155unsigned long lockdep_count_forward_deps(struct lock_class *class)
1156{
1157    unsigned long ret, flags;
1158    struct lock_list this;
1159
1160    this.parent = NULL;
1161    this.class = class;
1162
1163    local_irq_save(flags);
1164    __raw_spin_lock(&lockdep_lock);
1165    ret = __lockdep_count_forward_deps(&this);
1166    __raw_spin_unlock(&lockdep_lock);
1167    local_irq_restore(flags);
1168
1169    return ret;
1170}
1171
1172unsigned long __lockdep_count_backward_deps(struct lock_list *this)
1173{
1174    unsigned long count = 0;
1175    struct lock_list *uninitialized_var(target_entry);
1176
1177    __bfs_backwards(this, (void *)&count, noop_count, &target_entry);
1178
1179    return count;
1180}
1181
1182unsigned long lockdep_count_backward_deps(struct lock_class *class)
1183{
1184    unsigned long ret, flags;
1185    struct lock_list this;
1186
1187    this.parent = NULL;
1188    this.class = class;
1189
1190    local_irq_save(flags);
1191    __raw_spin_lock(&lockdep_lock);
1192    ret = __lockdep_count_backward_deps(&this);
1193    __raw_spin_unlock(&lockdep_lock);
1194    local_irq_restore(flags);
1195
1196    return ret;
1197}
1198
1199/*
1200 * Prove that the dependency graph starting at <entry> can not
1201 * lead to <target>. Print an error and return 0 if it does.
1202 */
1203static noinline int
1204check_noncircular(struct lock_list *root, struct lock_class *target,
1205        struct lock_list **target_entry)
1206{
1207    int result;
1208
1209    debug_atomic_inc(&nr_cyclic_checks);
1210
1211    result = __bfs_forwards(root, target, class_equal, target_entry);
1212
1213    return result;
1214}
1215
1216#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1217/*
1218 * Forwards and backwards subgraph searching, for the purposes of
1219 * proving that two subgraphs can be connected by a new dependency
1220 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1221 */
1222
1223static inline int usage_match(struct lock_list *entry, void *bit)
1224{
1225    return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
1226}
1227
1228
1229
1230/*
1231 * Find a node in the forwards-direction dependency sub-graph starting
1232 * at @root->class that matches @bit.
1233 *
1234 * Return 0 if such a node exists in the subgraph, and put that node
1235 * into *@target_entry.
1236 *
1237 * Return 1 otherwise and keep *@target_entry unchanged.
1238 * Return <0 on error.
1239 */
1240static int
1241find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
1242            struct lock_list **target_entry)
1243{
1244    int result;
1245
1246    debug_atomic_inc(&nr_find_usage_forwards_checks);
1247
1248    result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
1249
1250    return result;
1251}
1252
1253/*
1254 * Find a node in the backwards-direction dependency sub-graph starting
1255 * at @root->class that matches @bit.
1256 *
1257 * Return 0 if such a node exists in the subgraph, and put that node
1258 * into *@target_entry.
1259 *
1260 * Return 1 otherwise and keep *@target_entry unchanged.
1261 * Return <0 on error.
1262 */
1263static int
1264find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
1265            struct lock_list **target_entry)
1266{
1267    int result;
1268
1269    debug_atomic_inc(&nr_find_usage_backwards_checks);
1270
1271    result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
1272
1273    return result;
1274}
1275
1276static void print_lock_class_header(struct lock_class *class, int depth)
1277{
1278    int bit;
1279
1280    printk("%*s->", depth, "");
1281    print_lock_name(class);
1282    printk(" ops: %lu", class->ops);
1283    printk(" {\n");
1284
1285    for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
1286        if (class->usage_mask & (1 << bit)) {
1287            int len = depth;
1288
1289            len += printk("%*s %s", depth, "", usage_str[bit]);
1290            len += printk(" at:\n");
1291            print_stack_trace(class->usage_traces + bit, len);
1292        }
1293    }
1294    printk("%*s }\n", depth, "");
1295
1296    printk("%*s ... key at: ",depth,"");
1297    print_ip_sym((unsigned long)class->key);
1298}
1299
1300/*
1301 * printk the shortest lock dependencies from @start to @end in reverse order:
1302 */
1303static void __used
1304print_shortest_lock_dependencies(struct lock_list *leaf,
1305                struct lock_list *root)
1306{
1307    struct lock_list *entry = leaf;
1308    int depth;
1309
1310    /*compute depth from generated tree by BFS*/
1311    depth = get_lock_depth(leaf);
1312
1313    do {
1314        print_lock_class_header(entry->class, depth);
1315        printk("%*s ... acquired at:\n", depth, "");
1316        print_stack_trace(&entry->trace, 2);
1317        printk("\n");
1318
1319        if (depth == 0 && (entry != root)) {
1320            printk("lockdep:%s bad BFS generated tree\n", __func__);
1321            break;
1322        }
1323
1324        entry = get_lock_parent(entry);
1325        depth--;
1326    } while (entry && (depth >= 0));
1327
1328    return;
1329}
1330
1331static int
1332print_bad_irq_dependency(struct task_struct *curr,
1333             struct lock_list *prev_root,
1334             struct lock_list *next_root,
1335             struct lock_list *backwards_entry,
1336             struct lock_list *forwards_entry,
1337             struct held_lock *prev,
1338             struct held_lock *next,
1339             enum lock_usage_bit bit1,
1340             enum lock_usage_bit bit2,
1341             const char *irqclass)
1342{
1343    if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1344        return 0;
1345
1346    printk("\n======================================================\n");
1347    printk( "[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
1348        irqclass, irqclass);
1349    print_kernel_version();
1350    printk( "------------------------------------------------------\n");
1351    printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
1352        curr->comm, task_pid_nr(curr),
1353        curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
1354        curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
1355        curr->hardirqs_enabled,
1356        curr->softirqs_enabled);
1357    print_lock(next);
1358
1359    printk("\nand this task is already holding:\n");
1360    print_lock(prev);
1361    printk("which would create a new lock dependency:\n");
1362    print_lock_name(hlock_class(prev));
1363    printk(" ->");
1364    print_lock_name(hlock_class(next));
1365    printk("\n");
1366
1367    printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
1368        irqclass);
1369    print_lock_name(backwards_entry->class);
1370    printk("\n... which became %s-irq-safe at:\n", irqclass);
1371
1372    print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
1373
1374    printk("\nto a %s-irq-unsafe lock:\n", irqclass);
1375    print_lock_name(forwards_entry->class);
1376    printk("\n... which became %s-irq-unsafe at:\n", irqclass);
1377    printk("...");
1378
1379    print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
1380
1381    printk("\nother info that might help us debug this:\n\n");
1382    lockdep_print_held_locks(curr);
1383
1384    printk("\nthe dependencies between %s-irq-safe lock", irqclass);
1385    printk(" and the holding lock:\n");
1386    if (!save_trace(&prev_root->trace))
1387        return 0;
1388    print_shortest_lock_dependencies(backwards_entry, prev_root);
1389
1390    printk("\nthe dependencies between the lock to be acquired");
1391    printk(" and %s-irq-unsafe lock:\n", irqclass);
1392    if (!save_trace(&next_root->trace))
1393        return 0;
1394    print_shortest_lock_dependencies(forwards_entry, next_root);
1395
1396    printk("\nstack backtrace:\n");
1397    dump_stack();
1398
1399    return 0;
1400}
1401
1402static int
1403check_usage(struct task_struct *curr, struct held_lock *prev,
1404        struct held_lock *next, enum lock_usage_bit bit_backwards,
1405        enum lock_usage_bit bit_forwards, const char *irqclass)
1406{
1407    int ret;
1408    struct lock_list this, that;
1409    struct lock_list *uninitialized_var(target_entry);
1410    struct lock_list *uninitialized_var(target_entry1);
1411
1412    this.parent = NULL;
1413
1414    this.class = hlock_class(prev);
1415    ret = find_usage_backwards(&this, bit_backwards, &target_entry);
1416    if (ret < 0)
1417        return print_bfs_bug(ret);
1418    if (ret == 1)
1419        return ret;
1420
1421    that.parent = NULL;
1422    that.class = hlock_class(next);
1423    ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
1424    if (ret < 0)
1425        return print_bfs_bug(ret);
1426    if (ret == 1)
1427        return ret;
1428
1429    return print_bad_irq_dependency(curr, &this, &that,
1430            target_entry, target_entry1,
1431            prev, next,
1432            bit_backwards, bit_forwards, irqclass);
1433}
1434
1435static const char *state_names[] = {
1436#define LOCKDEP_STATE(__STATE) \
1437    __stringify(__STATE),
1438#include "lockdep_states.h"
1439#undef LOCKDEP_STATE
1440};
1441
1442static const char *state_rnames[] = {
1443#define LOCKDEP_STATE(__STATE) \
1444    __stringify(__STATE)"-READ",
1445#include "lockdep_states.h"
1446#undef LOCKDEP_STATE
1447};
1448
1449static inline const char *state_name(enum lock_usage_bit bit)
1450{
1451    return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2];
1452}
1453
1454static int exclusive_bit(int new_bit)
1455{
1456    /*
1457     * USED_IN
1458     * USED_IN_READ
1459     * ENABLED
1460     * ENABLED_READ
1461     *
1462     * bit 0 - write/read
1463     * bit 1 - used_in/enabled
1464     * bit 2+ state
1465     */
1466
1467    int state = new_bit & ~3;
1468    int dir = new_bit & 2;
1469
1470    /*
1471     * keep state, bit flip the direction and strip read.
1472     */
1473    return state | (dir ^ 2);
1474}
1475
1476static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
1477               struct held_lock *next, enum lock_usage_bit bit)
1478{
1479    /*
1480     * Prove that the new dependency does not connect a hardirq-safe
1481     * lock with a hardirq-unsafe lock - to achieve this we search
1482     * the backwards-subgraph starting at <prev>, and the
1483     * forwards-subgraph starting at <next>:
1484     */
1485    if (!check_usage(curr, prev, next, bit,
1486               exclusive_bit(bit), state_name(bit)))
1487        return 0;
1488
1489    bit++; /* _READ */
1490
1491    /*
1492     * Prove that the new dependency does not connect a hardirq-safe-read
1493     * lock with a hardirq-unsafe lock - to achieve this we search
1494     * the backwards-subgraph starting at <prev>, and the
1495     * forwards-subgraph starting at <next>:
1496     */
1497    if (!check_usage(curr, prev, next, bit,
1498               exclusive_bit(bit), state_name(bit)))
1499        return 0;
1500
1501    return 1;
1502}
1503
1504static int
1505check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1506        struct held_lock *next)
1507{
1508#define LOCKDEP_STATE(__STATE) \
1509    if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \
1510        return 0;
1511#include "lockdep_states.h"
1512#undef LOCKDEP_STATE
1513
1514    return 1;
1515}
1516
1517static void inc_chains(void)
1518{
1519    if (current->hardirq_context)
1520        nr_hardirq_chains++;
1521    else {
1522        if (current->softirq_context)
1523            nr_softirq_chains++;
1524        else
1525            nr_process_chains++;
1526    }
1527}
1528
1529#else
1530
1531static inline int
1532check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1533        struct held_lock *next)
1534{
1535    return 1;
1536}
1537
1538static inline void inc_chains(void)
1539{
1540    nr_process_chains++;
1541}
1542
1543#endif
1544
1545static int
1546print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
1547           struct held_lock *next)
1548{
1549    if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1550        return 0;
1551
1552    printk("\n=============================================\n");
1553    printk( "[ INFO: possible recursive locking detected ]\n");
1554    print_kernel_version();
1555    printk( "---------------------------------------------\n");
1556    printk("%s/%d is trying to acquire lock:\n",
1557        curr->comm, task_pid_nr(curr));
1558    print_lock(next);
1559    printk("\nbut task is already holding lock:\n");
1560    print_lock(prev);
1561
1562    printk("\nother info that might help us debug this:\n");
1563    lockdep_print_held_locks(curr);
1564
1565    printk("\nstack backtrace:\n");
1566    dump_stack();
1567
1568    return 0;
1569}
1570
1571/*
1572 * Check whether we are holding such a class already.
1573 *
1574 * (Note that this has to be done separately, because the graph cannot
1575 * detect such classes of deadlocks.)
1576 *
1577 * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
1578 */
1579static int
1580check_deadlock(struct task_struct *curr, struct held_lock *next,
1581           struct lockdep_map *next_instance, int read)
1582{
1583    struct held_lock *prev;
1584    struct held_lock *nest = NULL;
1585    int i;
1586
1587    for (i = 0; i < curr->lockdep_depth; i++) {
1588        prev = curr->held_locks + i;
1589
1590        if (prev->instance == next->nest_lock)
1591            nest = prev;
1592
1593        if (hlock_class(prev) != hlock_class(next))
1594            continue;
1595
1596        /*
1597         * Allow read-after-read recursion of the same
1598         * lock class (i.e. read_lock(lock)+read_lock(lock)):
1599         */
1600        if ((read == 2) && prev->read)
1601            return 2;
1602
1603        /*
1604         * We're holding the nest_lock, which serializes this lock's
1605         * nesting behaviour.
1606         */
1607        if (nest)
1608            return 2;
1609
1610        return print_deadlock_bug(curr, prev, next);
1611    }
1612    return 1;
1613}
1614
1615/*
1616 * There was a chain-cache miss, and we are about to add a new dependency
1617 * to a previous lock. We recursively validate the following rules:
1618 *
1619 * - would the adding of the <prev> -> <next> dependency create a
1620 * circular dependency in the graph? [== circular deadlock]
1621 *
1622 * - does the new prev->next dependency connect any hardirq-safe lock
1623 * (in the full backwards-subgraph starting at <prev>) with any
1624 * hardirq-unsafe lock (in the full forwards-subgraph starting at
1625 * <next>)? [== illegal lock inversion with hardirq contexts]
1626 *
1627 * - does the new prev->next dependency connect any softirq-safe lock
1628 * (in the full backwards-subgraph starting at <prev>) with any
1629 * softirq-unsafe lock (in the full forwards-subgraph starting at
1630 * <next>)? [== illegal lock inversion with softirq contexts]
1631 *
1632 * any of these scenarios could lead to a deadlock.
1633 *
1634 * Then if all the validations pass, we add the forwards and backwards
1635 * dependency.
1636 */
1637static int
1638check_prev_add(struct task_struct *curr, struct held_lock *prev,
1639           struct held_lock *next, int distance)
1640{
1641    struct lock_list *entry;
1642    int ret;
1643    struct lock_list this;
1644    struct lock_list *uninitialized_var(target_entry);
1645
1646    /*
1647     * Prove that the new <prev> -> <next> dependency would not
1648     * create a circular dependency in the graph. (We do this by
1649     * forward-recursing into the graph starting at <next>, and
1650     * checking whether we can reach <prev>.)
1651     *
1652     * We are using global variables to control the recursion, to
1653     * keep the stackframe size of the recursive functions low:
1654     */
1655    this.class = hlock_class(next);
1656    this.parent = NULL;
1657    ret = check_noncircular(&this, hlock_class(prev), &target_entry);
1658    if (unlikely(!ret))
1659        return print_circular_bug(&this, target_entry, next, prev);
1660    else if (unlikely(ret < 0))
1661        return print_bfs_bug(ret);
1662
1663    if (!check_prev_add_irq(curr, prev, next))
1664        return 0;
1665
1666    /*
1667     * For recursive read-locks we do all the dependency checks,
1668     * but we dont store read-triggered dependencies (only
1669     * write-triggered dependencies). This ensures that only the
1670     * write-side dependencies matter, and that if for example a
1671     * write-lock never takes any other locks, then the reads are
1672     * equivalent to a NOP.
1673     */
1674    if (next->read == 2 || prev->read == 2)
1675        return 1;
1676    /*
1677     * Is the <prev> -> <next> dependency already present?
1678     *
1679     * (this may occur even though this is a new chain: consider
1680     * e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
1681     * chains - the second one will be new, but L1 already has
1682     * L2 added to its dependency list, due to the first chain.)
1683     */
1684    list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
1685        if (entry->class == hlock_class(next)) {
1686            if (distance == 1)
1687                entry->distance = 1;
1688            return 2;
1689        }
1690    }
1691
1692    /*
1693     * Ok, all validations passed, add the new lock
1694     * to the previous lock's dependency list:
1695     */
1696    ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
1697                   &hlock_class(prev)->locks_after,
1698                   next->acquire_ip, distance);
1699
1700    if (!ret)
1701        return 0;
1702
1703    ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
1704                   &hlock_class(next)->locks_before,
1705                   next->acquire_ip, distance);
1706    if (!ret)
1707        return 0;
1708
1709    /*
1710     * Debugging printouts:
1711     */
1712    if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
1713        graph_unlock();
1714        printk("\n new dependency: ");
1715        print_lock_name(hlock_class(prev));
1716        printk(" => ");
1717        print_lock_name(hlock_class(next));
1718        printk("\n");
1719        dump_stack();
1720        return graph_lock();
1721    }
1722    return 1;
1723}
1724
1725/*
1726 * Add the dependency to all directly-previous locks that are 'relevant'.
1727 * The ones that are relevant are (in increasing distance from curr):
1728 * all consecutive trylock entries and the final non-trylock entry - or
1729 * the end of this context's lock-chain - whichever comes first.
1730 */
1731static int
1732check_prevs_add(struct task_struct *curr, struct held_lock *next)
1733{
1734    int depth = curr->lockdep_depth;
1735    struct held_lock *hlock;
1736
1737    /*
1738     * Debugging checks.
1739     *
1740     * Depth must not be zero for a non-head lock:
1741     */
1742    if (!depth)
1743        goto out_bug;
1744    /*
1745     * At least two relevant locks must exist for this
1746     * to be a head:
1747     */
1748    if (curr->held_locks[depth].irq_context !=
1749            curr->held_locks[depth-1].irq_context)
1750        goto out_bug;
1751
1752    for (;;) {
1753        int distance = curr->lockdep_depth - depth + 1;
1754        hlock = curr->held_locks + depth-1;
1755        /*
1756         * Only non-recursive-read entries get new dependencies
1757         * added:
1758         */
1759        if (hlock->read != 2) {
1760            if (!check_prev_add(curr, hlock, next, distance))
1761                return 0;
1762            /*
1763             * Stop after the first non-trylock entry,
1764             * as non-trylock entries have added their
1765             * own direct dependencies already, so this
1766             * lock is connected to them indirectly:
1767             */
1768            if (!hlock->trylock)
1769                break;
1770        }
1771        depth--;
1772        /*
1773         * End of lock-stack?
1774         */
1775        if (!depth)
1776            break;
1777        /*
1778         * Stop the search if we cross into another context:
1779         */
1780        if (curr->held_locks[depth].irq_context !=
1781                curr->held_locks[depth-1].irq_context)
1782            break;
1783    }
1784    return 1;
1785out_bug:
1786    if (!debug_locks_off_graph_unlock())
1787        return 0;
1788
1789    WARN_ON(1);
1790
1791    return 0;
1792}
1793
1794unsigned long nr_lock_chains;
1795struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
1796int nr_chain_hlocks;
1797static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
1798
1799struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
1800{
1801    return lock_classes + chain_hlocks[chain->base + i];
1802}
1803
1804/*
1805 * Look up a dependency chain. If the key is not present yet then
1806 * add it and return 1 - in this case the new dependency chain is
1807 * validated. If the key is already hashed, return 0.
1808 * (On return with 1 graph_lock is held.)
1809 */
1810static inline int lookup_chain_cache(struct task_struct *curr,
1811                     struct held_lock *hlock,
1812                     u64 chain_key)
1813{
1814    struct lock_class *class = hlock_class(hlock);
1815    struct list_head *hash_head = chainhashentry(chain_key);
1816    struct lock_chain *chain;
1817    struct held_lock *hlock_curr, *hlock_next;
1818    int i, j, n, cn;
1819
1820    if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1821        return 0;
1822    /*
1823     * We can walk it lock-free, because entries only get added
1824     * to the hash:
1825     */
1826    list_for_each_entry(chain, hash_head, entry) {
1827        if (chain->chain_key == chain_key) {
1828cache_hit:
1829            debug_atomic_inc(&chain_lookup_hits);
1830            if (very_verbose(class))
1831                printk("\nhash chain already cached, key: "
1832                    "%016Lx tail class: [%p] %s\n",
1833                    (unsigned long long)chain_key,
1834                    class->key, class->name);
1835            return 0;
1836        }
1837    }
1838    if (very_verbose(class))
1839        printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n",
1840            (unsigned long long)chain_key, class->key, class->name);
1841    /*
1842     * Allocate a new chain entry from the static array, and add
1843     * it to the hash:
1844     */
1845    if (!graph_lock())
1846        return 0;
1847    /*
1848     * We have to walk the chain again locked - to avoid duplicates:
1849     */
1850    list_for_each_entry(chain, hash_head, entry) {
1851        if (chain->chain_key == chain_key) {
1852            graph_unlock();
1853            goto cache_hit;
1854        }
1855    }
1856    if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
1857        if (!debug_locks_off_graph_unlock())
1858            return 0;
1859
1860        printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
1861        printk("turning off the locking correctness validator.\n");
1862        dump_stack();
1863        return 0;
1864    }
1865    chain = lock_chains + nr_lock_chains++;
1866    chain->chain_key = chain_key;
1867    chain->irq_context = hlock->irq_context;
1868    /* Find the first held_lock of current chain */
1869    hlock_next = hlock;
1870    for (i = curr->lockdep_depth - 1; i >= 0; i--) {
1871        hlock_curr = curr->held_locks + i;
1872        if (hlock_curr->irq_context != hlock_next->irq_context)
1873            break;
1874        hlock_next = hlock;
1875    }
1876    i++;
1877    chain->depth = curr->lockdep_depth + 1 - i;
1878    cn = nr_chain_hlocks;
1879    while (cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS) {
1880        n = cmpxchg(&nr_chain_hlocks, cn, cn + chain->depth);
1881        if (n == cn)
1882            break;
1883        cn = n;
1884    }
1885    if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
1886        chain->base = cn;
1887        for (j = 0; j < chain->depth - 1; j++, i++) {
1888            int lock_id = curr->held_locks[i].class_idx - 1;
1889            chain_hlocks[chain->base + j] = lock_id;
1890        }
1891        chain_hlocks[chain->base + j] = class - lock_classes;
1892    }
1893    list_add_tail_rcu(&chain->entry, hash_head);
1894    debug_atomic_inc(&chain_lookup_misses);
1895    inc_chains();
1896
1897    return 1;
1898}
1899
1900static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
1901        struct held_lock *hlock, int chain_head, u64 chain_key)
1902{
1903    /*
1904     * Trylock needs to maintain the stack of held locks, but it
1905     * does not add new dependencies, because trylock can be done
1906     * in any order.
1907     *
1908     * We look up the chain_key and do the O(N^2) check and update of
1909     * the dependencies only if this is a new dependency chain.
1910     * (If lookup_chain_cache() returns with 1 it acquires
1911     * graph_lock for us)
1912     */
1913    if (!hlock->trylock && (hlock->check == 2) &&
1914        lookup_chain_cache(curr, hlock, chain_key)) {
1915        /*
1916         * Check whether last held lock:
1917         *
1918         * - is irq-safe, if this lock is irq-unsafe
1919         * - is softirq-safe, if this lock is hardirq-unsafe
1920         *
1921         * And check whether the new lock's dependency graph
1922         * could lead back to the previous lock.
1923         *
1924         * any of these scenarios could lead to a deadlock. If
1925         * All validations
1926         */
1927        int ret = check_deadlock(curr, hlock, lock, hlock->read);
1928
1929        if (!ret)
1930            return 0;
1931        /*
1932         * Mark recursive read, as we jump over it when
1933         * building dependencies (just like we jump over
1934         * trylock entries):
1935         */
1936        if (ret == 2)
1937            hlock->read = 2;
1938        /*
1939         * Add dependency only if this lock is not the head
1940         * of the chain, and if it's not a secondary read-lock:
1941         */
1942        if (!chain_head && ret != 2)
1943            if (!check_prevs_add(curr, hlock))
1944                return 0;
1945        graph_unlock();
1946    } else
1947        /* after lookup_chain_cache(): */
1948        if (unlikely(!debug_locks))
1949            return 0;
1950
1951    return 1;
1952}
1953#else
1954static inline int validate_chain(struct task_struct *curr,
1955               struct lockdep_map *lock, struct held_lock *hlock,
1956        int chain_head, u64 chain_key)
1957{
1958    return 1;
1959}
1960#endif
1961
1962/*
1963 * We are building curr_chain_key incrementally, so double-check
1964 * it from scratch, to make sure that it's done correctly:
1965 */
1966static void check_chain_key(struct task_struct *curr)
1967{
1968#ifdef CONFIG_DEBUG_LOCKDEP
1969    struct held_lock *hlock, *prev_hlock = NULL;
1970    unsigned int i, id;
1971    u64 chain_key = 0;
1972
1973    for (i = 0; i < curr->lockdep_depth; i++) {
1974        hlock = curr->held_locks + i;
1975        if (chain_key != hlock->prev_chain_key) {
1976            debug_locks_off();
1977            WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
1978                curr->lockdep_depth, i,
1979                (unsigned long long)chain_key,
1980                (unsigned long long)hlock->prev_chain_key);
1981            return;
1982        }
1983        id = hlock->class_idx - 1;
1984        if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
1985            return;
1986
1987        if (prev_hlock && (prev_hlock->irq_context !=
1988                            hlock->irq_context))
1989            chain_key = 0;
1990        chain_key = iterate_chain_key(chain_key, id);
1991        prev_hlock = hlock;
1992    }
1993    if (chain_key != curr->curr_chain_key) {
1994        debug_locks_off();
1995        WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
1996            curr->lockdep_depth, i,
1997            (unsigned long long)chain_key,
1998            (unsigned long long)curr->curr_chain_key);
1999    }
2000#endif
2001}
2002
2003static int
2004print_usage_bug(struct task_struct *curr, struct held_lock *this,
2005        enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
2006{
2007    if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2008        return 0;
2009
2010    printk("\n=================================\n");
2011    printk( "[ INFO: inconsistent lock state ]\n");
2012    print_kernel_version();
2013    printk( "---------------------------------\n");
2014
2015    printk("inconsistent {%s} -> {%s} usage.\n",
2016        usage_str[prev_bit], usage_str[new_bit]);
2017
2018    printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
2019        curr->comm, task_pid_nr(curr),
2020        trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
2021        trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
2022        trace_hardirqs_enabled(curr),
2023        trace_softirqs_enabled(curr));
2024    print_lock(this);
2025
2026    printk("{%s} state was registered at:\n", usage_str[prev_bit]);
2027    print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
2028
2029    print_irqtrace_events(curr);
2030    printk("\nother info that might help us debug this:\n");
2031    lockdep_print_held_locks(curr);
2032
2033    printk("\nstack backtrace:\n");
2034    dump_stack();
2035
2036    return 0;
2037}
2038
2039/*
2040 * Print out an error if an invalid bit is set:
2041 */
2042static inline int
2043valid_state(struct task_struct *curr, struct held_lock *this,
2044        enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
2045{
2046    if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
2047        return print_usage_bug(curr, this, bad_bit, new_bit);
2048    return 1;
2049}
2050
2051static int mark_lock(struct task_struct *curr, struct held_lock *this,
2052             enum lock_usage_bit new_bit);
2053
2054#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
2055
2056/*
2057 * print irq inversion bug:
2058 */
2059static int
2060print_irq_inversion_bug(struct task_struct *curr,
2061            struct lock_list *root, struct lock_list *other,
2062            struct held_lock *this, int forwards,
2063            const char *irqclass)
2064{
2065    if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2066        return 0;
2067
2068    printk("\n=========================================================\n");
2069    printk( "[ INFO: possible irq lock inversion dependency detected ]\n");
2070    print_kernel_version();
2071    printk( "---------------------------------------------------------\n");
2072    printk("%s/%d just changed the state of lock:\n",
2073        curr->comm, task_pid_nr(curr));
2074    print_lock(this);
2075    if (forwards)
2076        printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
2077    else
2078        printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
2079    print_lock_name(other->class);
2080    printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
2081
2082    printk("\nother info that might help us debug this:\n");
2083    lockdep_print_held_locks(curr);
2084
2085    printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
2086    if (!save_trace(&root->trace))
2087        return 0;
2088    print_shortest_lock_dependencies(other, root);
2089
2090    printk("\nstack backtrace:\n");
2091    dump_stack();
2092
2093    return 0;
2094}
2095
2096/*
2097 * Prove that in the forwards-direction subgraph starting at <this>
2098 * there is no lock matching <mask>:
2099 */
2100static int
2101check_usage_forwards(struct task_struct *curr, struct held_lock *this,
2102             enum lock_usage_bit bit, const char *irqclass)
2103{
2104    int ret;
2105    struct lock_list root;
2106    struct lock_list *uninitialized_var(target_entry);
2107
2108    root.parent = NULL;
2109    root.class = hlock_class(this);
2110    ret = find_usage_forwards(&root, bit, &target_entry);
2111    if (ret < 0)
2112        return print_bfs_bug(ret);
2113    if (ret == 1)
2114        return ret;
2115
2116    return print_irq_inversion_bug(curr, &root, target_entry,
2117                    this, 1, irqclass);
2118}
2119
2120/*
2121 * Prove that in the backwards-direction subgraph starting at <this>
2122 * there is no lock matching <mask>:
2123 */
2124static int
2125check_usage_backwards(struct task_struct *curr, struct held_lock *this,
2126              enum lock_usage_bit bit, const char *irqclass)
2127{
2128    int ret;
2129    struct lock_list root;
2130    struct lock_list *uninitialized_var(target_entry);
2131
2132    root.parent = NULL;
2133    root.class = hlock_class(this);
2134    ret = find_usage_backwards(&root, bit, &target_entry);
2135    if (ret < 0)
2136        return print_bfs_bug(ret);
2137    if (ret == 1)
2138        return ret;
2139
2140    return print_irq_inversion_bug(curr, &root, target_entry,
2141                    this, 1, irqclass);
2142}
2143
2144void print_irqtrace_events(struct task_struct *curr)
2145{
2146    printk("irq event stamp: %u\n", curr->irq_events);
2147    printk("hardirqs last enabled at (%u): ", curr->hardirq_enable_event);
2148    print_ip_sym(curr->hardirq_enable_ip);
2149    printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event);
2150    print_ip_sym(curr->hardirq_disable_ip);
2151    printk("softirqs last enabled at (%u): ", curr->softirq_enable_event);
2152    print_ip_sym(curr->softirq_enable_ip);
2153    printk("softirqs last disabled at (%u): ", curr->softirq_disable_event);
2154    print_ip_sym(curr->softirq_disable_ip);
2155}
2156
2157static int HARDIRQ_verbose(struct lock_class *class)
2158{
2159#if HARDIRQ_VERBOSE
2160    return class_filter(class);
2161#endif
2162    return 0;
2163}
2164
2165static int SOFTIRQ_verbose(struct lock_class *class)
2166{
2167#if SOFTIRQ_VERBOSE
2168    return class_filter(class);
2169#endif
2170    return 0;
2171}
2172
2173static int RECLAIM_FS_verbose(struct lock_class *class)
2174{
2175#if RECLAIM_VERBOSE
2176    return class_filter(class);
2177#endif
2178    return 0;
2179}
2180
2181#define STRICT_READ_CHECKS 1
2182
2183static int (*state_verbose_f[])(struct lock_class *class) = {
2184#define LOCKDEP_STATE(__STATE) \
2185    __STATE##_verbose,
2186#include "lockdep_states.h"
2187#undef LOCKDEP_STATE
2188};
2189
2190static inline int state_verbose(enum lock_usage_bit bit,
2191                struct lock_class *class)
2192{
2193    return state_verbose_f[bit >> 2](class);
2194}
2195
2196typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
2197                 enum lock_usage_bit bit, const char *name);
2198
2199static int
2200mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2201        enum lock_usage_bit new_bit)
2202{
2203    int excl_bit = exclusive_bit(new_bit);
2204    int read = new_bit & 1;
2205    int dir = new_bit & 2;
2206
2207    /*
2208     * mark USED_IN has to look forwards -- to ensure no dependency
2209     * has ENABLED state, which would allow recursion deadlocks.
2210     *
2211     * mark ENABLED has to look backwards -- to ensure no dependee
2212     * has USED_IN state, which, again, would allow recursion deadlocks.
2213     */
2214    check_usage_f usage = dir ?
2215        check_usage_backwards : check_usage_forwards;
2216
2217    /*
2218     * Validate that this particular lock does not have conflicting
2219     * usage states.
2220     */
2221    if (!valid_state(curr, this, new_bit, excl_bit))
2222        return 0;
2223
2224    /*
2225     * Validate that the lock dependencies don't have conflicting usage
2226     * states.
2227     */
2228    if ((!read || !dir || STRICT_READ_CHECKS) &&
2229            !usage(curr, this, excl_bit, state_name(new_bit & ~1)))
2230        return 0;
2231
2232    /*
2233     * Check for read in write conflicts
2234     */
2235    if (!read) {
2236        if (!valid_state(curr, this, new_bit, excl_bit + 1))
2237            return 0;
2238
2239        if (STRICT_READ_CHECKS &&
2240            !usage(curr, this, excl_bit + 1,
2241                state_name(new_bit + 1)))
2242            return 0;
2243    }
2244
2245    if (state_verbose(new_bit, hlock_class(this)))
2246        return 2;
2247
2248    return 1;
2249}
2250
2251enum mark_type {
2252#define LOCKDEP_STATE(__STATE) __STATE,
2253#include "lockdep_states.h"
2254#undef LOCKDEP_STATE
2255};
2256
2257/*
2258 * Mark all held locks with a usage bit:
2259 */
2260static int
2261mark_held_locks(struct task_struct *curr, enum mark_type mark)
2262{
2263    enum lock_usage_bit usage_bit;
2264    struct held_lock *hlock;
2265    int i;
2266
2267    for (i = 0; i < curr->lockdep_depth; i++) {
2268        hlock = curr->held_locks + i;
2269
2270        usage_bit = 2 + (mark << 2); /* ENABLED */
2271        if (hlock->read)
2272            usage_bit += 1; /* READ */
2273
2274        BUG_ON(usage_bit >= LOCK_USAGE_STATES);
2275
2276        if (!mark_lock(curr, hlock, usage_bit))
2277            return 0;
2278    }
2279
2280    return 1;
2281}
2282
2283/*
2284 * Debugging helper: via this flag we know that we are in
2285 * 'early bootup code', and will warn about any invalid irqs-on event:
2286 */
2287static int early_boot_irqs_enabled;
2288
2289void early_boot_irqs_off(void)
2290{
2291    early_boot_irqs_enabled = 0;
2292}
2293
2294void early_boot_irqs_on(void)
2295{
2296    early_boot_irqs_enabled = 1;
2297}
2298
2299/*
2300 * Hardirqs will be enabled:
2301 */
2302void trace_hardirqs_on_caller(unsigned long ip)
2303{
2304    struct task_struct *curr = current;
2305
2306    time_hardirqs_on(CALLER_ADDR0, ip);
2307
2308    if (unlikely(!debug_locks || current->lockdep_recursion))
2309        return;
2310
2311    if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled)))
2312        return;
2313
2314    if (unlikely(curr->hardirqs_enabled)) {
2315        debug_atomic_inc(&redundant_hardirqs_on);
2316        return;
2317    }
2318    /* we'll do an OFF -> ON transition: */
2319    curr->hardirqs_enabled = 1;
2320
2321    if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2322        return;
2323    if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
2324        return;
2325    /*
2326     * We are going to turn hardirqs on, so set the
2327     * usage bit for all held locks:
2328     */
2329    if (!mark_held_locks(curr, HARDIRQ))
2330        return;
2331    /*
2332     * If we have softirqs enabled, then set the usage
2333     * bit for all held locks. (disabled hardirqs prevented
2334     * this bit from being set before)
2335     */
2336    if (curr->softirqs_enabled)
2337        if (!mark_held_locks(curr, SOFTIRQ))
2338            return;
2339
2340    curr->hardirq_enable_ip = ip;
2341    curr->hardirq_enable_event = ++curr->irq_events;
2342    debug_atomic_inc(&hardirqs_on_events);
2343}
2344EXPORT_SYMBOL(trace_hardirqs_on_caller);
2345
2346void trace_hardirqs_on(void)
2347{
2348    trace_hardirqs_on_caller(CALLER_ADDR0);
2349}
2350EXPORT_SYMBOL(trace_hardirqs_on);
2351
2352/*
2353 * Hardirqs were disabled:
2354 */
2355void trace_hardirqs_off_caller(unsigned long ip)
2356{
2357    struct task_struct *curr = current;
2358
2359    time_hardirqs_off(CALLER_ADDR0, ip);
2360
2361    if (unlikely(!debug_locks || current->lockdep_recursion))
2362        return;
2363
2364    if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2365        return;
2366
2367    if (curr->hardirqs_enabled) {
2368        /*
2369         * We have done an ON -> OFF transition:
2370         */
2371        curr->hardirqs_enabled = 0;
2372        curr->hardirq_disable_ip = ip;
2373        curr->hardirq_disable_event = ++curr->irq_events;
2374        debug_atomic_inc(&hardirqs_off_events);
2375    } else
2376        debug_atomic_inc(&redundant_hardirqs_off);
2377}
2378EXPORT_SYMBOL(trace_hardirqs_off_caller);
2379
2380void trace_hardirqs_off(void)
2381{
2382    trace_hardirqs_off_caller(CALLER_ADDR0);
2383}
2384EXPORT_SYMBOL(trace_hardirqs_off);
2385
2386/*
2387 * Softirqs will be enabled:
2388 */
2389void trace_softirqs_on(unsigned long ip)
2390{
2391    struct task_struct *curr = current;
2392
2393    if (unlikely(!debug_locks))
2394        return;
2395
2396    if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2397        return;
2398
2399    if (curr->softirqs_enabled) {
2400        debug_atomic_inc(&redundant_softirqs_on);
2401        return;
2402    }
2403
2404    /*
2405     * We'll do an OFF -> ON transition:
2406     */
2407    curr->softirqs_enabled = 1;
2408    curr->softirq_enable_ip = ip;
2409    curr->softirq_enable_event = ++curr->irq_events;
2410    debug_atomic_inc(&softirqs_on_events);
2411    /*
2412     * We are going to turn softirqs on, so set the
2413     * usage bit for all held locks, if hardirqs are
2414     * enabled too:
2415     */
2416    if (curr->hardirqs_enabled)
2417        mark_held_locks(curr, SOFTIRQ);
2418}
2419
2420/*
2421 * Softirqs were disabled:
2422 */
2423void trace_softirqs_off(unsigned long ip)
2424{
2425    struct task_struct *curr = current;
2426
2427    if (unlikely(!debug_locks))
2428        return;
2429
2430    if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2431        return;
2432
2433    if (curr->softirqs_enabled) {
2434        /*
2435         * We have done an ON -> OFF transition:
2436         */
2437        curr->softirqs_enabled = 0;
2438        curr->softirq_disable_ip = ip;
2439        curr->softirq_disable_event = ++curr->irq_events;
2440        debug_atomic_inc(&softirqs_off_events);
2441        DEBUG_LOCKS_WARN_ON(!softirq_count());
2442    } else
2443        debug_atomic_inc(&redundant_softirqs_off);
2444}
2445
2446static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags)
2447{
2448    struct task_struct *curr = current;
2449
2450    if (unlikely(!debug_locks))
2451        return;
2452
2453    /* no reclaim without waiting on it */
2454    if (!(gfp_mask & __GFP_WAIT))
2455        return;
2456
2457    /* this guy won't enter reclaim */
2458    if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC))
2459        return;
2460
2461    /* We're only interested __GFP_FS allocations for now */
2462    if (!(gfp_mask & __GFP_FS))
2463        return;
2464
2465    if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags)))
2466        return;
2467
2468    mark_held_locks(curr, RECLAIM_FS);
2469}
2470
2471static void check_flags(unsigned long flags);
2472
2473void lockdep_trace_alloc(gfp_t gfp_mask)
2474{
2475    unsigned long flags;
2476
2477    if (unlikely(current->lockdep_recursion))
2478        return;
2479
2480    raw_local_irq_save(flags);
2481    check_flags(flags);
2482    current->lockdep_recursion = 1;
2483    __lockdep_trace_alloc(gfp_mask, flags);
2484    current->lockdep_recursion = 0;
2485    raw_local_irq_restore(flags);
2486}
2487
2488static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
2489{
2490    /*
2491     * If non-trylock use in a hardirq or softirq context, then
2492     * mark the lock as used in these contexts:
2493     */
2494    if (!hlock->trylock) {
2495        if (hlock->read) {
2496            if (curr->hardirq_context)
2497                if (!mark_lock(curr, hlock,
2498                        LOCK_USED_IN_HARDIRQ_READ))
2499                    return 0;
2500            if (curr->softirq_context)
2501                if (!mark_lock(curr, hlock,
2502                        LOCK_USED_IN_SOFTIRQ_READ))
2503                    return 0;
2504        } else {
2505            if (curr->hardirq_context)
2506                if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
2507                    return 0;
2508            if (curr->softirq_context)
2509                if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
2510                    return 0;
2511        }
2512    }
2513    if (!hlock->hardirqs_off) {
2514        if (hlock->read) {
2515            if (!mark_lock(curr, hlock,
2516                    LOCK_ENABLED_HARDIRQ_READ))
2517                return 0;
2518            if (curr->softirqs_enabled)
2519                if (!mark_lock(curr, hlock,
2520                        LOCK_ENABLED_SOFTIRQ_READ))
2521                    return 0;
2522        } else {
2523            if (!mark_lock(curr, hlock,
2524                    LOCK_ENABLED_HARDIRQ))
2525                return 0;
2526            if (curr->softirqs_enabled)
2527                if (!mark_lock(curr, hlock,
2528                        LOCK_ENABLED_SOFTIRQ))
2529                    return 0;
2530        }
2531    }
2532
2533    /*
2534     * We reuse the irq context infrastructure more broadly as a general
2535     * context checking code. This tests GFP_FS recursion (a lock taken
2536     * during reclaim for a GFP_FS allocation is held over a GFP_FS
2537     * allocation).
2538     */
2539    if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) {
2540        if (hlock->read) {
2541            if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ))
2542                    return 0;
2543        } else {
2544            if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS))
2545                    return 0;
2546        }
2547    }
2548
2549    return 1;
2550}
2551
2552static int separate_irq_context(struct task_struct *curr,
2553        struct held_lock *hlock)
2554{
2555    unsigned int depth = curr->lockdep_depth;
2556
2557    /*
2558     * Keep track of points where we cross into an interrupt context:
2559     */
2560    hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
2561                curr->softirq_context;
2562    if (depth) {
2563        struct held_lock *prev_hlock;
2564
2565        prev_hlock = curr->held_locks + depth-1;
2566        /*
2567         * If we cross into another context, reset the
2568         * hash key (this also prevents the checking and the
2569         * adding of the dependency to 'prev'):
2570         */
2571        if (prev_hlock->irq_context != hlock->irq_context)
2572            return 1;
2573    }
2574    return 0;
2575}
2576
2577#else
2578
2579static inline
2580int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2581        enum lock_usage_bit new_bit)
2582{
2583    WARN_ON(1);
2584    return 1;
2585}
2586
2587static inline int mark_irqflags(struct task_struct *curr,
2588        struct held_lock *hlock)
2589{
2590    return 1;
2591}
2592
2593static inline int separate_irq_context(struct task_struct *curr,
2594        struct held_lock *hlock)
2595{
2596    return 0;
2597}
2598
2599void lockdep_trace_alloc(gfp_t gfp_mask)
2600{
2601}
2602
2603#endif
2604
2605/*
2606 * Mark a lock with a usage bit, and validate the state transition:
2607 */
2608static int mark_lock(struct task_struct *curr, struct held_lock *this,
2609                 enum lock_usage_bit new_bit)
2610{
2611    unsigned int new_mask = 1 << new_bit, ret = 1;
2612
2613    /*
2614     * If already set then do not dirty the cacheline,
2615     * nor do any checks:
2616     */
2617    if (likely(hlock_class(this)->usage_mask & new_mask))
2618        return 1;
2619
2620    if (!graph_lock())
2621        return 0;
2622    /*
2623     * Make sure we didnt race:
2624     */
2625    if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
2626        graph_unlock();
2627        return 1;
2628    }
2629
2630    hlock_class(this)->usage_mask |= new_mask;
2631
2632    if (!save_trace(hlock_class(this)->usage_traces + new_bit))
2633        return 0;
2634
2635    switch (new_bit) {
2636#define LOCKDEP_STATE(__STATE) \
2637    case LOCK_USED_IN_##__STATE: \
2638    case LOCK_USED_IN_##__STATE##_READ: \
2639    case LOCK_ENABLED_##__STATE: \
2640    case LOCK_ENABLED_##__STATE##_READ:
2641#include "lockdep_states.h"
2642#undef LOCKDEP_STATE
2643        ret = mark_lock_irq(curr, this, new_bit);
2644        if (!ret)
2645            return 0;
2646        break;
2647    case LOCK_USED:
2648        debug_atomic_dec(&nr_unused_locks);
2649        break;
2650    default:
2651        if (!debug_locks_off_graph_unlock())
2652            return 0;
2653        WARN_ON(1);
2654        return 0;
2655    }
2656
2657    graph_unlock();
2658
2659    /*
2660     * We must printk outside of the graph_lock:
2661     */
2662    if (ret == 2) {
2663        printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
2664        print_lock(this);
2665        print_irqtrace_events(curr);
2666        dump_stack();
2667    }
2668
2669    return ret;
2670}
2671
2672/*
2673 * Initialize a lock instance's lock-class mapping info:
2674 */
2675void lockdep_init_map(struct lockdep_map *lock, const char *name,
2676              struct lock_class_key *key, int subclass)
2677{
2678    lock->class_cache = NULL;
2679#ifdef CONFIG_LOCK_STAT
2680    lock->cpu = raw_smp_processor_id();
2681#endif
2682
2683    if (DEBUG_LOCKS_WARN_ON(!name)) {
2684        lock->name = "NULL";
2685        return;
2686    }
2687
2688    lock->name = name;
2689
2690    if (DEBUG_LOCKS_WARN_ON(!key))
2691        return;
2692    /*
2693     * Sanity check, the lock-class key must be persistent:
2694     */
2695    if (!static_obj(key)) {
2696        printk("BUG: key %p not in .data!\n", key);
2697        DEBUG_LOCKS_WARN_ON(1);
2698        return;
2699    }
2700    lock->key = key;
2701
2702    if (unlikely(!debug_locks))
2703        return;
2704
2705    if (subclass)
2706        register_lock_class(lock, subclass, 1);
2707}
2708EXPORT_SYMBOL_GPL(lockdep_init_map);
2709
2710/*
2711 * This gets called for every mutex_lock*()/spin_lock*() operation.
2712 * We maintain the dependency maps and validate the locking attempt:
2713 */
2714static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2715              int trylock, int read, int check, int hardirqs_off,
2716              struct lockdep_map *nest_lock, unsigned long ip,
2717              int references)
2718{
2719    struct task_struct *curr = current;
2720    struct lock_class *class = NULL;
2721    struct held_lock *hlock;
2722    unsigned int depth, id;
2723    int chain_head = 0;
2724    int class_idx;
2725    u64 chain_key;
2726
2727    if (!prove_locking)
2728        check = 1;
2729
2730    if (unlikely(!debug_locks))
2731        return 0;
2732
2733    if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2734        return 0;
2735
2736    if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
2737        debug_locks_off();
2738        printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");
2739        printk("turning off the locking correctness validator.\n");
2740        dump_stack();
2741        return 0;
2742    }
2743
2744    if (!subclass)
2745        class = lock->class_cache;
2746    /*
2747     * Not cached yet or subclass?
2748     */
2749    if (unlikely(!class)) {
2750        class = register_lock_class(lock, subclass, 0);
2751        if (!class)
2752            return 0;
2753    }
2754    debug_atomic_inc((atomic_t *)&class->ops);
2755    if (very_verbose(class)) {
2756        printk("\nacquire class [%p] %s", class->key, class->name);
2757        if (class->name_version > 1)
2758            printk("#%d", class->name_version);
2759        printk("\n");
2760        dump_stack();
2761    }
2762
2763    /*
2764     * Add the lock to the list of currently held locks.
2765     * (we dont increase the depth just yet, up until the
2766     * dependency checks are done)
2767     */
2768    depth = curr->lockdep_depth;
2769    if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
2770        return 0;
2771
2772    class_idx = class - lock_classes + 1;
2773
2774    if (depth) {
2775        hlock = curr->held_locks + depth - 1;
2776        if (hlock->class_idx == class_idx && nest_lock) {
2777            if (hlock->references)
2778                hlock->references++;
2779            else
2780                hlock->references = 2;
2781
2782            return 1;
2783        }
2784    }
2785
2786    hlock = curr->held_locks + depth;
2787    if (DEBUG_LOCKS_WARN_ON(!class))
2788        return 0;
2789    hlock->class_idx = class_idx;
2790    hlock->acquire_ip = ip;
2791    hlock->instance = lock;
2792    hlock->nest_lock = nest_lock;
2793    hlock->trylock = trylock;
2794    hlock->read = read;
2795    hlock->check = check;
2796    hlock->hardirqs_off = !!hardirqs_off;
2797    hlock->references = references;
2798#ifdef CONFIG_LOCK_STAT
2799    hlock->waittime_stamp = 0;
2800    hlock->holdtime_stamp = lockstat_clock();
2801#endif
2802
2803    if (check == 2 && !mark_irqflags(curr, hlock))
2804        return 0;
2805
2806    /* mark it as used: */
2807    if (!mark_lock(curr, hlock, LOCK_USED))
2808        return 0;
2809
2810    /*
2811     * Calculate the chain hash: it's the combined hash of all the
2812     * lock keys along the dependency chain. We save the hash value
2813     * at every step so that we can get the current hash easily
2814     * after unlock. The chain hash is then used to cache dependency
2815     * results.
2816     *
2817     * The 'key ID' is what is the most compact key value to drive
2818     * the hash, not class->key.
2819     */
2820    id = class - lock_classes;
2821    if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
2822        return 0;
2823
2824    chain_key = curr->curr_chain_key;
2825    if (!depth) {
2826        if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
2827            return 0;
2828        chain_head = 1;
2829    }
2830
2831    hlock->prev_chain_key = chain_key;
2832    if (separate_irq_context(curr, hlock)) {
2833        chain_key = 0;
2834        chain_head = 1;
2835    }
2836    chain_key = iterate_chain_key(chain_key, id);
2837
2838    if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
2839        return 0;
2840
2841    curr->curr_chain_key = chain_key;
2842    curr->lockdep_depth++;
2843    check_chain_key(curr);
2844#ifdef CONFIG_DEBUG_LOCKDEP
2845    if (unlikely(!debug_locks))
2846        return 0;
2847#endif
2848    if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
2849        debug_locks_off();
2850        printk("BUG: MAX_LOCK_DEPTH too low!\n");
2851        printk("turning off the locking correctness validator.\n");
2852        dump_stack();
2853        return 0;
2854    }
2855
2856    if (unlikely(curr->lockdep_depth > max_lockdep_depth))
2857        max_lockdep_depth = curr->lockdep_depth;
2858
2859    return 1;
2860}
2861
2862static int
2863print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
2864               unsigned long ip)
2865{
2866    if (!debug_locks_off())
2867        return 0;
2868    if (debug_locks_silent)
2869        return 0;
2870
2871    printk("\n=====================================\n");
2872    printk( "[ BUG: bad unlock balance detected! ]\n");
2873    printk( "-------------------------------------\n");
2874    printk("%s/%d is trying to release lock (",
2875        curr->comm, task_pid_nr(curr));
2876    print_lockdep_cache(lock);
2877    printk(") at:\n");
2878    print_ip_sym(ip);
2879    printk("but there are no more locks to release!\n");
2880    printk("\nother info that might help us debug this:\n");
2881    lockdep_print_held_locks(curr);
2882
2883    printk("\nstack backtrace:\n");
2884    dump_stack();
2885
2886    return 0;
2887}
2888
2889/*
2890 * Common debugging checks for both nested and non-nested unlock:
2891 */
2892static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
2893            unsigned long ip)
2894{
2895    if (unlikely(!debug_locks))
2896        return 0;
2897    if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2898        return 0;
2899
2900    if (curr->lockdep_depth <= 0)
2901        return print_unlock_inbalance_bug(curr, lock, ip);
2902
2903    return 1;
2904}
2905
2906static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
2907{
2908    if (hlock->instance == lock)
2909        return 1;
2910
2911    if (hlock->references) {
2912        struct lock_class *class = lock->class_cache;
2913
2914        if (!class)
2915            class = look_up_lock_class(lock, 0);
2916
2917        if (DEBUG_LOCKS_WARN_ON(!class))
2918            return 0;
2919
2920        if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
2921            return 0;
2922
2923        if (hlock->class_idx == class - lock_classes + 1)
2924            return 1;
2925    }
2926
2927    return 0;
2928}
2929
2930static int
2931__lock_set_class(struct lockdep_map *lock, const char *name,
2932         struct lock_class_key *key, unsigned int subclass,
2933         unsigned long ip)
2934{
2935    struct task_struct *curr = current;
2936    struct held_lock *hlock, *prev_hlock;
2937    struct lock_class *class;
2938    unsigned int depth;
2939    int i;
2940
2941    depth = curr->lockdep_depth;
2942    if (DEBUG_LOCKS_WARN_ON(!depth))
2943        return 0;
2944
2945    prev_hlock = NULL;
2946    for (i = depth-1; i >= 0; i--) {
2947        hlock = curr->held_locks + i;
2948        /*
2949         * We must not cross into another context:
2950         */
2951        if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2952            break;
2953        if (match_held_lock(hlock, lock))
2954            goto found_it;
2955        prev_hlock = hlock;
2956    }
2957    return print_unlock_inbalance_bug(curr, lock, ip);
2958
2959found_it:
2960    lockdep_init_map(lock, name, key, 0);
2961    class = register_lock_class(lock, subclass, 0);
2962    hlock->class_idx = class - lock_classes + 1;
2963
2964    curr->lockdep_depth = i;
2965    curr->curr_chain_key = hlock->prev_chain_key;
2966
2967    for (; i < depth; i++) {
2968        hlock = curr->held_locks + i;
2969        if (!__lock_acquire(hlock->instance,
2970            hlock_class(hlock)->subclass, hlock->trylock,
2971                hlock->read, hlock->check, hlock->hardirqs_off,
2972                hlock->nest_lock, hlock->acquire_ip,
2973                hlock->references))
2974            return 0;
2975    }
2976
2977    if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
2978        return 0;
2979    return 1;
2980}
2981
2982/*
2983 * Remove the lock to the list of currently held locks in a
2984 * potentially non-nested (out of order) manner. This is a
2985 * relatively rare operation, as all the unlock APIs default
2986 * to nested mode (which uses lock_release()):
2987 */
2988static int
2989lock_release_non_nested(struct task_struct *curr,
2990            struct lockdep_map *lock, unsigned long ip)
2991{
2992    struct held_lock *hlock, *prev_hlock;
2993    unsigned int depth;
2994    int i;
2995
2996    /*
2997     * Check whether the lock exists in the current stack
2998     * of held locks:
2999     */
3000    depth = curr->lockdep_depth;
3001    if (DEBUG_LOCKS_WARN_ON(!depth))
3002        return 0;
3003
3004    prev_hlock = NULL;
3005    for (i = depth-1; i >= 0; i--) {
3006        hlock = curr->held_locks + i;
3007        /*
3008         * We must not cross into another context:
3009         */
3010        if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3011            break;
3012        if (match_held_lock(hlock, lock))
3013            goto found_it;
3014        prev_hlock = hlock;
3015    }
3016    return print_unlock_inbalance_bug(curr, lock, ip);
3017
3018found_it:
3019    if (hlock->instance == lock)
3020        lock_release_holdtime(hlock);
3021
3022    if (hlock->references) {
3023        hlock->references--;
3024        if (hlock->references) {
3025            /*
3026             * We had, and after removing one, still have
3027             * references, the current lock stack is still
3028             * valid. We're done!
3029             */
3030            return 1;
3031        }
3032    }
3033
3034    /*
3035     * We have the right lock to unlock, 'hlock' points to it.
3036     * Now we remove it from the stack, and add back the other
3037     * entries (if any), recalculating the hash along the way:
3038     */
3039
3040    curr->lockdep_depth = i;
3041    curr->curr_chain_key = hlock->prev_chain_key;
3042
3043    for (i++; i < depth; i++) {
3044        hlock = curr->held_locks + i;
3045        if (!__lock_acquire(hlock->instance,
3046            hlock_class(hlock)->subclass, hlock->trylock,
3047                hlock->read, hlock->check, hlock->hardirqs_off,
3048                hlock->nest_lock, hlock->acquire_ip,
3049                hlock->references))
3050            return 0;
3051    }
3052
3053    if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
3054        return 0;
3055    return 1;
3056}
3057
3058/*
3059 * Remove the lock to the list of currently held locks - this gets
3060 * called on mutex_unlock()/spin_unlock*() (or on a failed
3061 * mutex_lock_interruptible()). This is done for unlocks that nest
3062 * perfectly. (i.e. the current top of the lock-stack is unlocked)
3063 */
3064static int lock_release_nested(struct task_struct *curr,
3065                   struct lockdep_map *lock, unsigned long ip)
3066{
3067    struct held_lock *hlock;
3068    unsigned int depth;
3069
3070    /*
3071     * Pop off the top of the lock stack:
3072     */
3073    depth = curr->lockdep_depth - 1;
3074    hlock = curr->held_locks + depth;
3075
3076    /*
3077     * Is the unlock non-nested:
3078     */
3079    if (hlock->instance != lock || hlock->references)
3080        return lock_release_non_nested(curr, lock, ip);
3081    curr->lockdep_depth--;
3082
3083    if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))
3084        return 0;
3085
3086    curr->curr_chain_key = hlock->prev_chain_key;
3087
3088    lock_release_holdtime(hlock);
3089
3090#ifdef CONFIG_DEBUG_LOCKDEP
3091    hlock->prev_chain_key = 0;
3092    hlock->class_idx = 0;
3093    hlock->acquire_ip = 0;
3094    hlock->irq_context = 0;
3095#endif
3096    return 1;
3097}
3098
3099/*
3100 * Remove the lock to the list of currently held locks - this gets
3101 * called on mutex_unlock()/spin_unlock*() (or on a failed
3102 * mutex_lock_interruptible()). This is done for unlocks that nest
3103 * perfectly. (i.e. the current top of the lock-stack is unlocked)
3104 */
3105static void
3106__lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
3107{
3108    struct task_struct *curr = current;
3109
3110    if (!check_unlock(curr, lock, ip))
3111        return;
3112
3113    if (nested) {
3114        if (!lock_release_nested(curr, lock, ip))
3115            return;
3116    } else {
3117        if (!lock_release_non_nested(curr, lock, ip))
3118            return;
3119    }
3120
3121    check_chain_key(curr);
3122}
3123
3124static int __lock_is_held(struct lockdep_map *lock)
3125{
3126    struct task_struct *curr = current;
3127    int i;
3128
3129    for (i = 0; i < curr->lockdep_depth; i++) {
3130        struct held_lock *hlock = curr->held_locks + i;
3131
3132        if (match_held_lock(hlock, lock))
3133            return 1;
3134    }
3135
3136    return 0;
3137}
3138
3139/*
3140 * Check whether we follow the irq-flags state precisely:
3141 */
3142static void check_flags(unsigned long flags)
3143{
3144#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
3145    defined(CONFIG_TRACE_IRQFLAGS)
3146    if (!debug_locks)
3147        return;
3148
3149    if (irqs_disabled_flags(flags)) {
3150        if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
3151            printk("possible reason: unannotated irqs-off.\n");
3152        }
3153    } else {
3154        if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
3155            printk("possible reason: unannotated irqs-on.\n");
3156        }
3157    }
3158
3159    /*
3160     * We dont accurately track softirq state in e.g.
3161     * hardirq contexts (such as on 4KSTACKS), so only
3162     * check if not in hardirq contexts:
3163     */
3164    if (!hardirq_count()) {
3165        if (softirq_count())
3166            DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
3167        else
3168            DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
3169    }
3170
3171    if (!debug_locks)
3172        print_irqtrace_events(current);
3173#endif
3174}
3175
3176void lock_set_class(struct lockdep_map *lock, const char *name,
3177            struct lock_class_key *key, unsigned int subclass,
3178            unsigned long ip)
3179{
3180    unsigned long flags;
3181
3182    if (unlikely(current->lockdep_recursion))
3183        return;
3184
3185    raw_local_irq_save(flags);
3186    current->lockdep_recursion = 1;
3187    check_flags(flags);
3188    if (__lock_set_class(lock, name, key, subclass, ip))
3189        check_chain_key(current);
3190    current->lockdep_recursion = 0;
3191    raw_local_irq_restore(flags);
3192}
3193EXPORT_SYMBOL_GPL(lock_set_class);
3194
3195/*
3196 * We are not always called with irqs disabled - do that here,
3197 * and also avoid lockdep recursion:
3198 */
3199void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3200              int trylock, int read, int check,
3201              struct lockdep_map *nest_lock, unsigned long ip)
3202{
3203    unsigned long flags;
3204
3205    trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
3206
3207    if (unlikely(current->lockdep_recursion))
3208        return;
3209
3210    raw_local_irq_save(flags);
3211    check_flags(flags);
3212
3213    current->lockdep_recursion = 1;
3214    __lock_acquire(lock, subclass, trylock, read, check,
3215               irqs_disabled_flags(flags), nest_lock, ip, 0);
3216    current->lockdep_recursion = 0;
3217    raw_local_irq_restore(flags);
3218}
3219EXPORT_SYMBOL_GPL(lock_acquire);
3220
3221void lock_release(struct lockdep_map *lock, int nested,
3222              unsigned long ip)
3223{
3224    unsigned long flags;
3225
3226    trace_lock_release(lock, nested, ip);
3227
3228    if (unlikely(current->lockdep_recursion))
3229        return;
3230
3231    raw_local_irq_save(flags);
3232    check_flags(flags);
3233    current->lockdep_recursion = 1;
3234    __lock_release(lock, nested, ip);
3235    current->lockdep_recursion = 0;
3236    raw_local_irq_restore(flags);
3237}
3238EXPORT_SYMBOL_GPL(lock_release);
3239
3240int lock_is_held(struct lockdep_map *lock)
3241{
3242    unsigned long flags;
3243    int ret = 0;
3244
3245    if (unlikely(current->lockdep_recursion))
3246        return ret;
3247
3248    raw_local_irq_save(flags);
3249    check_flags(flags);
3250
3251    current->lockdep_recursion = 1;
3252    ret = __lock_is_held(lock);
3253    current->lockdep_recursion = 0;
3254    raw_local_irq_restore(flags);
3255
3256    return ret;
3257}
3258EXPORT_SYMBOL_GPL(lock_is_held);
3259
3260void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
3261{
3262    current->lockdep_reclaim_gfp = gfp_mask;
3263}
3264
3265void lockdep_clear_current_reclaim_state(void)
3266{
3267    current->lockdep_reclaim_gfp = 0;
3268}
3269
3270#ifdef CONFIG_LOCK_STAT
3271static int
3272print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
3273               unsigned long ip)
3274{
3275    if (!debug_locks_off())
3276        return 0;
3277    if (debug_locks_silent)
3278        return 0;
3279
3280    printk("\n=================================\n");
3281    printk( "[ BUG: bad contention detected! ]\n");
3282    printk( "---------------------------------\n");
3283    printk("%s/%d is trying to contend lock (",
3284        curr->comm, task_pid_nr(curr));
3285    print_lockdep_cache(lock);
3286    printk(") at:\n");
3287    print_ip_sym(ip);
3288    printk("but there are no locks held!\n");
3289    printk("\nother info that might help us debug this:\n");
3290    lockdep_print_held_locks(curr);
3291
3292    printk("\nstack backtrace:\n");
3293    dump_stack();
3294
3295    return 0;
3296}
3297
3298static void
3299__lock_contended(struct lockdep_map *lock, unsigned long ip)
3300{
3301    struct task_struct *curr = current;
3302    struct held_lock *hlock, *prev_hlock;
3303    struct lock_class_stats *stats;
3304    unsigned int depth;
3305    int i, contention_point, contending_point;
3306
3307    depth = curr->lockdep_depth;
3308    if (DEBUG_LOCKS_WARN_ON(!depth))
3309        return;
3310
3311    prev_hlock = NULL;
3312    for (i = depth-1; i >= 0; i--) {
3313        hlock = curr->held_locks + i;
3314        /*
3315         * We must not cross into another context:
3316         */
3317        if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3318            break;
3319        if (match_held_lock(hlock, lock))
3320            goto found_it;
3321        prev_hlock = hlock;
3322    }
3323    print_lock_contention_bug(curr, lock, ip);
3324    return;
3325
3326found_it:
3327    if (hlock->instance != lock)
3328        return;
3329
3330    hlock->waittime_stamp = lockstat_clock();
3331
3332    contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
3333    contending_point = lock_point(hlock_class(hlock)->contending_point,
3334                      lock->ip);
3335
3336    stats = get_lock_stats(hlock_class(hlock));
3337    if (contention_point < LOCKSTAT_POINTS)
3338        stats->contention_point[contention_point]++;
3339    if (contending_point < LOCKSTAT_POINTS)
3340        stats->contending_point[contending_point]++;
3341    if (lock->cpu != smp_processor_id())
3342        stats->bounces[bounce_contended + !!hlock->read]++;
3343    put_lock_stats(stats);
3344}
3345
3346static void
3347__lock_acquired(struct lockdep_map *lock, unsigned long ip)
3348{
3349    struct task_struct *curr = current;
3350    struct held_lock *hlock, *prev_hlock;
3351    struct lock_class_stats *stats;
3352    unsigned int depth;
3353    u64 now, waittime = 0;
3354    int i, cpu;
3355
3356    depth = curr->lockdep_depth;
3357    if (DEBUG_LOCKS_WARN_ON(!depth))
3358        return;
3359
3360    prev_hlock = NULL;
3361    for (i = depth-1; i >= 0; i--) {
3362        hlock = curr->held_locks + i;
3363        /*
3364         * We must not cross into another context:
3365         */
3366        if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3367            break;
3368        if (match_held_lock(hlock, lock))
3369            goto found_it;
3370        prev_hlock = hlock;
3371    }
3372    print_lock_contention_bug(curr, lock, _RET_IP_);
3373    return;
3374
3375found_it:
3376    if (hlock->instance != lock)
3377        return;
3378
3379    cpu = smp_processor_id();
3380    if (hlock->waittime_stamp) {
3381        now = lockstat_clock();
3382        waittime = now - hlock->waittime_stamp;
3383        hlock->holdtime_stamp = now;
3384    }
3385
3386    trace_lock_acquired(lock, ip, waittime);
3387
3388    stats = get_lock_stats(hlock_class(hlock));
3389    if (waittime) {
3390        if (hlock->read)
3391            lock_time_inc(&stats->read_waittime, waittime);
3392        else
3393            lock_time_inc(&stats->write_waittime, waittime);
3394    }
3395    if (lock->cpu != cpu)
3396        stats->bounces[bounce_acquired + !!hlock->read]++;
3397    put_lock_stats(stats);
3398
3399    lock->cpu = cpu;
3400    lock->ip = ip;
3401}
3402
3403void lock_contended(struct lockdep_map *lock, unsigned long ip)
3404{
3405    unsigned long flags;
3406
3407    trace_lock_contended(lock, ip);
3408
3409    if (unlikely(!lock_stat))
3410        return;
3411
3412    if (unlikely(current->lockdep_recursion))
3413        return;
3414
3415    raw_local_irq_save(flags);
3416    check_flags(flags);
3417    current->lockdep_recursion = 1;
3418    __lock_contended(lock, ip);
3419    current->lockdep_recursion = 0;
3420    raw_local_irq_restore(flags);
3421}
3422EXPORT_SYMBOL_GPL(lock_contended);
3423
3424void lock_acquired(struct lockdep_map *lock, unsigned long ip)
3425{
3426    unsigned long flags;
3427
3428    if (unlikely(!lock_stat))
3429        return;
3430
3431    if (unlikely(current->lockdep_recursion))
3432        return;
3433
3434    raw_local_irq_save(flags);
3435    check_flags(flags);
3436    current->lockdep_recursion = 1;
3437    __lock_acquired(lock, ip);
3438    current->lockdep_recursion = 0;
3439    raw_local_irq_restore(flags);
3440}
3441EXPORT_SYMBOL_GPL(lock_acquired);
3442#endif
3443
3444/*
3445 * Used by the testsuite, sanitize the validator state
3446 * after a simulated failure:
3447 */
3448
3449void lockdep_reset(void)
3450{
3451    unsigned long flags;
3452    int i;
3453
3454    raw_local_irq_save(flags);
3455    current->curr_chain_key = 0;
3456    current->lockdep_depth = 0;
3457    current->lockdep_recursion = 0;
3458    memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
3459    nr_hardirq_chains = 0;
3460    nr_softirq_chains = 0;
3461    nr_process_chains = 0;
3462    debug_locks = 1;
3463    for (i = 0; i < CHAINHASH_SIZE; i++)
3464        INIT_LIST_HEAD(chainhash_table + i);
3465    raw_local_irq_restore(flags);
3466}
3467
3468static void zap_class(struct lock_class *class)
3469{
3470    int i;
3471
3472    /*
3473     * Remove all dependencies this lock is
3474     * involved in:
3475     */
3476    for (i = 0; i < nr_list_entries; i++) {
3477        if (list_entries[i].class == class)
3478            list_del_rcu(&list_entries[i].entry);
3479    }
3480    /*
3481     * Unhash the class and remove it from the all_lock_classes list:
3482     */
3483    list_del_rcu(&class->hash_entry);
3484    list_del_rcu(&class->lock_entry);
3485
3486    class->key = NULL;
3487}
3488
3489static inline int within(const void *addr, void *start, unsigned long size)
3490{
3491    return addr >= start && addr < start + size;
3492}
3493
3494void lockdep_free_key_range(void *start, unsigned long size)
3495{
3496    struct lock_class *class, *next;
3497    struct list_head *head;
3498    unsigned long flags;
3499    int i;
3500    int locked;
3501
3502    raw_local_irq_save(flags);
3503    locked = graph_lock();
3504
3505    /*
3506     * Unhash all classes that were created by this module:
3507     */
3508    for (i = 0; i < CLASSHASH_SIZE; i++) {
3509        head = classhash_table + i;
3510        if (list_empty(head))
3511            continue;
3512        list_for_each_entry_safe(class, next, head, hash_entry) {
3513            if (within(class->key, start, size))
3514                zap_class(class);
3515            else if (within(class->name, start, size))
3516                zap_class(class);
3517        }
3518    }
3519
3520    if (locked)
3521        graph_unlock();
3522    raw_local_irq_restore(flags);
3523}
3524
3525void lockdep_reset_lock(struct lockdep_map *lock)
3526{
3527    struct lock_class *class, *next;
3528    struct list_head *head;
3529    unsigned long flags;
3530    int i, j;
3531    int locked;
3532
3533    raw_local_irq_save(flags);
3534
3535    /*
3536     * Remove all classes this lock might have:
3537     */
3538    for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
3539        /*
3540         * If the class exists we look it up and zap it:
3541         */
3542        class = look_up_lock_class(lock, j);
3543        if (class)
3544            zap_class(class);
3545    }
3546    /*
3547     * Debug check: in the end all mapped classes should
3548     * be gone.
3549     */
3550    locked = graph_lock();
3551    for (i = 0; i < CLASSHASH_SIZE; i++) {
3552        head = classhash_table + i;
3553        if (list_empty(head))
3554            continue;
3555        list_for_each_entry_safe(class, next, head, hash_entry) {
3556            if (unlikely(class == lock->class_cache)) {
3557                if (debug_locks_off_graph_unlock())
3558                    WARN_ON(1);
3559                goto out_restore;
3560            }
3561        }
3562    }
3563    if (locked)
3564        graph_unlock();
3565
3566out_restore:
3567    raw_local_irq_restore(flags);
3568}
3569
3570void lockdep_init(void)
3571{
3572    int i;
3573
3574    /*
3575     * Some architectures have their own start_kernel()
3576     * code which calls lockdep_init(), while we also
3577     * call lockdep_init() from the start_kernel() itself,
3578     * and we want to initialize the hashes only once:
3579     */
3580    if (lockdep_initialized)
3581        return;
3582
3583    for (i = 0; i < CLASSHASH_SIZE; i++)
3584        INIT_LIST_HEAD(classhash_table + i);
3585
3586    for (i = 0; i < CHAINHASH_SIZE; i++)
3587        INIT_LIST_HEAD(chainhash_table + i);
3588
3589    lockdep_initialized = 1;
3590}
3591
3592void __init lockdep_info(void)
3593{
3594    printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
3595
3596    printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES);
3597    printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH);
3598    printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS);
3599    printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE);
3600    printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES);
3601    printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS);
3602    printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE);
3603
3604    printk(" memory used by lock dependency info: %lu kB\n",
3605        (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS +
3606        sizeof(struct list_head) * CLASSHASH_SIZE +
3607        sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
3608        sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
3609        sizeof(struct list_head) * CHAINHASH_SIZE
3610#ifdef CONFIG_PROVE_LOCKING
3611        + sizeof(struct circular_queue)
3612#endif
3613        ) / 1024
3614        );
3615
3616    printk(" per task-struct memory footprint: %lu bytes\n",
3617        sizeof(struct held_lock) * MAX_LOCK_DEPTH);
3618
3619#ifdef CONFIG_DEBUG_LOCKDEP
3620    if (lockdep_init_error) {
3621        printk("WARNING: lockdep init error! Arch code didn't call lockdep_init() early enough?\n");
3622        printk("Call stack leading to lockdep invocation was:\n");
3623        print_stack_trace(&lockdep_init_trace, 0);
3624    }
3625#endif
3626}
3627
3628static void
3629print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
3630             const void *mem_to, struct held_lock *hlock)
3631{
3632    if (!debug_locks_off())
3633        return;
3634    if (debug_locks_silent)
3635        return;
3636
3637    printk("\n=========================\n");
3638    printk( "[ BUG: held lock freed! ]\n");
3639    printk( "-------------------------\n");
3640    printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
3641        curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
3642    print_lock(hlock);
3643    lockdep_print_held_locks(curr);
3644
3645    printk("\nstack backtrace:\n");
3646    dump_stack();
3647}
3648
3649static inline int not_in_range(const void* mem_from, unsigned long mem_len,
3650                const void* lock_from, unsigned long lock_len)
3651{
3652    return lock_from + lock_len <= mem_from ||
3653        mem_from + mem_len <= lock_from;
3654}
3655
3656/*
3657 * Called when kernel memory is freed (or unmapped), or if a lock
3658 * is destroyed or reinitialized - this code checks whether there is
3659 * any held lock in the memory range of <from> to <to>:
3660 */
3661void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
3662{
3663    struct task_struct *curr = current;
3664    struct held_lock *hlock;
3665    unsigned long flags;
3666    int i;
3667
3668    if (unlikely(!debug_locks))
3669        return;
3670
3671    local_irq_save(flags);
3672    for (i = 0; i < curr->lockdep_depth; i++) {
3673        hlock = curr->held_locks + i;
3674
3675        if (not_in_range(mem_from, mem_len, hlock->instance,
3676                    sizeof(*hlock->instance)))
3677            continue;
3678
3679        print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
3680        break;
3681    }
3682    local_irq_restore(flags);
3683}
3684EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
3685
3686static void print_held_locks_bug(struct task_struct *curr)
3687{
3688    if (!debug_locks_off())
3689        return;
3690    if (debug_locks_silent)
3691        return;
3692
3693    printk("\n=====================================\n");
3694    printk( "[ BUG: lock held at task exit time! ]\n");
3695    printk( "-------------------------------------\n");
3696    printk("%s/%d is exiting with locks still held!\n",
3697        curr->comm, task_pid_nr(curr));
3698    lockdep_print_held_locks(curr);
3699
3700    printk("\nstack backtrace:\n");
3701    dump_stack();
3702}
3703
3704void debug_check_no_locks_held(struct task_struct *task)
3705{
3706    if (unlikely(task->lockdep_depth > 0))
3707        print_held_locks_bug(task);
3708}
3709
3710void debug_show_all_locks(void)
3711{
3712    struct task_struct *g, *p;
3713    int count = 10;
3714    int unlock = 1;
3715
3716    if (unlikely(!debug_locks)) {
3717        printk("INFO: lockdep is turned off.\n");
3718        return;
3719    }
3720    printk("\nShowing all locks held in the system:\n");
3721
3722    /*
3723     * Here we try to get the tasklist_lock as hard as possible,
3724     * if not successful after 2 seconds we ignore it (but keep
3725     * trying). This is to enable a debug printout even if a
3726     * tasklist_lock-holding task deadlocks or crashes.
3727     */
3728retry:
3729    if (!read_trylock(&tasklist_lock)) {
3730        if (count == 10)
3731            printk("hm, tasklist_lock locked, retrying... ");
3732        if (count) {
3733            count--;
3734            printk(" #%d", 10-count);
3735            mdelay(200);
3736            goto retry;
3737        }
3738        printk(" ignoring it.\n");
3739        unlock = 0;
3740    } else {
3741        if (count != 10)
3742            printk(KERN_CONT " locked it.\n");
3743    }
3744
3745    do_each_thread(g, p) {
3746        /*
3747         * It's not reliable to print a task's held locks
3748         * if it's not sleeping (or if it's not the current
3749         * task):
3750         */
3751        if (p->state == TASK_RUNNING && p != current)
3752            continue;
3753        if (p->lockdep_depth)
3754            lockdep_print_held_locks(p);
3755        if (!unlock)
3756            if (read_trylock(&tasklist_lock))
3757                unlock = 1;
3758    } while_each_thread(g, p);
3759
3760    printk("\n");
3761    printk("=============================================\n\n");
3762
3763    if (unlock)
3764        read_unlock(&tasklist_lock);
3765}
3766EXPORT_SYMBOL_GPL(debug_show_all_locks);
3767
3768/*
3769 * Careful: only use this function if you are sure that
3770 * the task cannot run in parallel!
3771 */
3772void __debug_show_held_locks(struct task_struct *task)
3773{
3774    if (unlikely(!debug_locks)) {
3775        printk("INFO: lockdep is turned off.\n");
3776        return;
3777    }
3778    lockdep_print_held_locks(task);
3779}
3780EXPORT_SYMBOL_GPL(__debug_show_held_locks);
3781
3782void debug_show_held_locks(struct task_struct *task)
3783{
3784        __debug_show_held_locks(task);
3785}
3786EXPORT_SYMBOL_GPL(debug_show_held_locks);
3787
3788void lockdep_sys_exit(void)
3789{
3790    struct task_struct *curr = current;
3791
3792    if (unlikely(curr->lockdep_depth)) {
3793        if (!debug_locks_off())
3794            return;
3795        printk("\n================================================\n");
3796        printk( "[ BUG: lock held when returning to user space! ]\n");
3797        printk( "------------------------------------------------\n");
3798        printk("%s/%d is leaving the kernel with locks still held!\n",
3799                curr->comm, curr->pid);
3800        lockdep_print_held_locks(curr);
3801    }
3802}
3803

Archive Download this file



interactive