Root/kernel/lockdep.c

1/*
2 * kernel/lockdep.c
3 *
4 * Runtime locking correctness validator
5 *
6 * Started by Ingo Molnar:
7 *
8 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
10 *
11 * this code maps all the lock dependencies as they occur in a live kernel
12 * and will warn about the following classes of locking bugs:
13 *
14 * - lock inversion scenarios
15 * - circular lock dependencies
16 * - hardirq/softirq safe/unsafe locking bugs
17 *
18 * Bugs are reported even if the current locking scenario does not cause
19 * any deadlock at this point.
20 *
21 * I.e. if anytime in the past two locks were taken in a different order,
22 * even if it happened for another task, even if those were different
23 * locks (but of the same class as this lock), this code will detect it.
24 *
25 * Thanks to Arjan van de Ven for coming up with the initial idea of
26 * mapping lock dependencies runtime.
27 */
28#define DISABLE_BRANCH_PROFILING
29#include <linux/mutex.h>
30#include <linux/sched.h>
31#include <linux/delay.h>
32#include <linux/module.h>
33#include <linux/proc_fs.h>
34#include <linux/seq_file.h>
35#include <linux/spinlock.h>
36#include <linux/kallsyms.h>
37#include <linux/interrupt.h>
38#include <linux/stacktrace.h>
39#include <linux/debug_locks.h>
40#include <linux/irqflags.h>
41#include <linux/utsname.h>
42#include <linux/hash.h>
43#include <linux/ftrace.h>
44#include <linux/stringify.h>
45#include <linux/bitops.h>
46#include <linux/gfp.h>
47
48#include <asm/sections.h>
49
50#include "lockdep_internals.h"
51
52#define CREATE_TRACE_POINTS
53#include <trace/events/lock.h>
54
55#ifdef CONFIG_PROVE_LOCKING
56int prove_locking = 1;
57module_param(prove_locking, int, 0644);
58#else
59#define prove_locking 0
60#endif
61
62#ifdef CONFIG_LOCK_STAT
63int lock_stat = 1;
64module_param(lock_stat, int, 0644);
65#else
66#define lock_stat 0
67#endif
68
69/*
70 * lockdep_lock: protects the lockdep graph, the hashes and the
71 * class/list/hash allocators.
72 *
73 * This is one of the rare exceptions where it's justified
74 * to use a raw spinlock - we really dont want the spinlock
75 * code to recurse back into the lockdep code...
76 */
77static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
78
79static int graph_lock(void)
80{
81    arch_spin_lock(&lockdep_lock);
82    /*
83     * Make sure that if another CPU detected a bug while
84     * walking the graph we dont change it (while the other
85     * CPU is busy printing out stuff with the graph lock
86     * dropped already)
87     */
88    if (!debug_locks) {
89        arch_spin_unlock(&lockdep_lock);
90        return 0;
91    }
92    /* prevent any recursions within lockdep from causing deadlocks */
93    current->lockdep_recursion++;
94    return 1;
95}
96
97static inline int graph_unlock(void)
98{
99    if (debug_locks && !arch_spin_is_locked(&lockdep_lock))
100        return DEBUG_LOCKS_WARN_ON(1);
101
102    current->lockdep_recursion--;
103    arch_spin_unlock(&lockdep_lock);
104    return 0;
105}
106
107/*
108 * Turn lock debugging off and return with 0 if it was off already,
109 * and also release the graph lock:
110 */
111static inline int debug_locks_off_graph_unlock(void)
112{
113    int ret = debug_locks_off();
114
115    arch_spin_unlock(&lockdep_lock);
116
117    return ret;
118}
119
120static int lockdep_initialized;
121
122unsigned long nr_list_entries;
123static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
124
125/*
126 * All data structures here are protected by the global debug_lock.
127 *
128 * Mutex key structs only get allocated, once during bootup, and never
129 * get freed - this significantly simplifies the debugging code.
130 */
131unsigned long nr_lock_classes;
132static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
133
134static inline struct lock_class *hlock_class(struct held_lock *hlock)
135{
136    if (!hlock->class_idx) {
137        DEBUG_LOCKS_WARN_ON(1);
138        return NULL;
139    }
140    return lock_classes + hlock->class_idx - 1;
141}
142
143#ifdef CONFIG_LOCK_STAT
144static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS],
145              cpu_lock_stats);
146
147static inline u64 lockstat_clock(void)
148{
149    return local_clock();
150}
151
152static int lock_point(unsigned long points[], unsigned long ip)
153{
154    int i;
155
156    for (i = 0; i < LOCKSTAT_POINTS; i++) {
157        if (points[i] == 0) {
158            points[i] = ip;
159            break;
160        }
161        if (points[i] == ip)
162            break;
163    }
164
165    return i;
166}
167
168static void lock_time_inc(struct lock_time *lt, u64 time)
169{
170    if (time > lt->max)
171        lt->max = time;
172
173    if (time < lt->min || !lt->nr)
174        lt->min = time;
175
176    lt->total += time;
177    lt->nr++;
178}
179
180static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
181{
182    if (!src->nr)
183        return;
184
185    if (src->max > dst->max)
186        dst->max = src->max;
187
188    if (src->min < dst->min || !dst->nr)
189        dst->min = src->min;
190
191    dst->total += src->total;
192    dst->nr += src->nr;
193}
194
195struct lock_class_stats lock_stats(struct lock_class *class)
196{
197    struct lock_class_stats stats;
198    int cpu, i;
199
200    memset(&stats, 0, sizeof(struct lock_class_stats));
201    for_each_possible_cpu(cpu) {
202        struct lock_class_stats *pcs =
203            &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
204
205        for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
206            stats.contention_point[i] += pcs->contention_point[i];
207
208        for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
209            stats.contending_point[i] += pcs->contending_point[i];
210
211        lock_time_add(&pcs->read_waittime, &stats.read_waittime);
212        lock_time_add(&pcs->write_waittime, &stats.write_waittime);
213
214        lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
215        lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
216
217        for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
218            stats.bounces[i] += pcs->bounces[i];
219    }
220
221    return stats;
222}
223
224void clear_lock_stats(struct lock_class *class)
225{
226    int cpu;
227
228    for_each_possible_cpu(cpu) {
229        struct lock_class_stats *cpu_stats =
230            &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
231
232        memset(cpu_stats, 0, sizeof(struct lock_class_stats));
233    }
234    memset(class->contention_point, 0, sizeof(class->contention_point));
235    memset(class->contending_point, 0, sizeof(class->contending_point));
236}
237
238static struct lock_class_stats *get_lock_stats(struct lock_class *class)
239{
240    return &get_cpu_var(cpu_lock_stats)[class - lock_classes];
241}
242
243static void put_lock_stats(struct lock_class_stats *stats)
244{
245    put_cpu_var(cpu_lock_stats);
246}
247
248static void lock_release_holdtime(struct held_lock *hlock)
249{
250    struct lock_class_stats *stats;
251    u64 holdtime;
252
253    if (!lock_stat)
254        return;
255
256    holdtime = lockstat_clock() - hlock->holdtime_stamp;
257
258    stats = get_lock_stats(hlock_class(hlock));
259    if (hlock->read)
260        lock_time_inc(&stats->read_holdtime, holdtime);
261    else
262        lock_time_inc(&stats->write_holdtime, holdtime);
263    put_lock_stats(stats);
264}
265#else
266static inline void lock_release_holdtime(struct held_lock *hlock)
267{
268}
269#endif
270
271/*
272 * We keep a global list of all lock classes. The list only grows,
273 * never shrinks. The list is only accessed with the lockdep
274 * spinlock lock held.
275 */
276LIST_HEAD(all_lock_classes);
277
278/*
279 * The lockdep classes are in a hash-table as well, for fast lookup:
280 */
281#define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
282#define CLASSHASH_SIZE (1UL << CLASSHASH_BITS)
283#define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS)
284#define classhashentry(key) (classhash_table + __classhashfn((key)))
285
286static struct list_head classhash_table[CLASSHASH_SIZE];
287
288/*
289 * We put the lock dependency chains into a hash-table as well, to cache
290 * their existence:
291 */
292#define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1)
293#define CHAINHASH_SIZE (1UL << CHAINHASH_BITS)
294#define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS)
295#define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
296
297static struct list_head chainhash_table[CHAINHASH_SIZE];
298
299/*
300 * The hash key of the lock dependency chains is a hash itself too:
301 * it's a hash of all locks taken up to that lock, including that lock.
302 * It's a 64-bit hash, because it's important for the keys to be
303 * unique.
304 */
305#define iterate_chain_key(key1, key2) \
306    (((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \
307    ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \
308    (key2))
309
310void lockdep_off(void)
311{
312    current->lockdep_recursion++;
313}
314EXPORT_SYMBOL(lockdep_off);
315
316void lockdep_on(void)
317{
318    current->lockdep_recursion--;
319}
320EXPORT_SYMBOL(lockdep_on);
321
322/*
323 * Debugging switches:
324 */
325
326#define VERBOSE 0
327#define VERY_VERBOSE 0
328
329#if VERBOSE
330# define HARDIRQ_VERBOSE 1
331# define SOFTIRQ_VERBOSE 1
332# define RECLAIM_VERBOSE 1
333#else
334# define HARDIRQ_VERBOSE 0
335# define SOFTIRQ_VERBOSE 0
336# define RECLAIM_VERBOSE 0
337#endif
338
339#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE
340/*
341 * Quick filtering for interesting events:
342 */
343static int class_filter(struct lock_class *class)
344{
345#if 0
346    /* Example */
347    if (class->name_version == 1 &&
348            !strcmp(class->name, "lockname"))
349        return 1;
350    if (class->name_version == 1 &&
351            !strcmp(class->name, "&struct->lockfield"))
352        return 1;
353#endif
354    /* Filter everything else. 1 would be to allow everything else */
355    return 0;
356}
357#endif
358
359static int verbose(struct lock_class *class)
360{
361#if VERBOSE
362    return class_filter(class);
363#endif
364    return 0;
365}
366
367/*
368 * Stack-trace: tightly packed array of stack backtrace
369 * addresses. Protected by the graph_lock.
370 */
371unsigned long nr_stack_trace_entries;
372static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
373
374static int save_trace(struct stack_trace *trace)
375{
376    trace->nr_entries = 0;
377    trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
378    trace->entries = stack_trace + nr_stack_trace_entries;
379
380    trace->skip = 3;
381
382    save_stack_trace(trace);
383
384    /*
385     * Some daft arches put -1 at the end to indicate its a full trace.
386     *
387     * <rant> this is buggy anyway, since it takes a whole extra entry so a
388     * complete trace that maxes out the entries provided will be reported
389     * as incomplete, friggin useless </rant>
390     */
391    if (trace->nr_entries != 0 &&
392        trace->entries[trace->nr_entries-1] == ULONG_MAX)
393        trace->nr_entries--;
394
395    trace->max_entries = trace->nr_entries;
396
397    nr_stack_trace_entries += trace->nr_entries;
398
399    if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
400        if (!debug_locks_off_graph_unlock())
401            return 0;
402
403        printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
404        printk("turning off the locking correctness validator.\n");
405        dump_stack();
406
407        return 0;
408    }
409
410    return 1;
411}
412
413unsigned int nr_hardirq_chains;
414unsigned int nr_softirq_chains;
415unsigned int nr_process_chains;
416unsigned int max_lockdep_depth;
417
418#ifdef CONFIG_DEBUG_LOCKDEP
419/*
420 * We cannot printk in early bootup code. Not even early_printk()
421 * might work. So we mark any initialization errors and printk
422 * about it later on, in lockdep_info().
423 */
424static int lockdep_init_error;
425static unsigned long lockdep_init_trace_data[20];
426static struct stack_trace lockdep_init_trace = {
427    .max_entries = ARRAY_SIZE(lockdep_init_trace_data),
428    .entries = lockdep_init_trace_data,
429};
430
431/*
432 * Various lockdep statistics:
433 */
434DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
435#endif
436
437/*
438 * Locking printouts:
439 */
440
441#define __USAGE(__STATE) \
442    [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \
443    [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \
444    [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
445    [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
446
447static const char *usage_str[] =
448{
449#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
450#include "lockdep_states.h"
451#undef LOCKDEP_STATE
452    [LOCK_USED] = "INITIAL USE",
453};
454
455const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
456{
457    return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
458}
459
460static inline unsigned long lock_flag(enum lock_usage_bit bit)
461{
462    return 1UL << bit;
463}
464
465static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
466{
467    char c = '.';
468
469    if (class->usage_mask & lock_flag(bit + 2))
470        c = '+';
471    if (class->usage_mask & lock_flag(bit)) {
472        c = '-';
473        if (class->usage_mask & lock_flag(bit + 2))
474            c = '?';
475    }
476
477    return c;
478}
479
480void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
481{
482    int i = 0;
483
484#define LOCKDEP_STATE(__STATE) \
485    usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \
486    usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
487#include "lockdep_states.h"
488#undef LOCKDEP_STATE
489
490    usage[i] = '\0';
491}
492
493static void print_lock_name(struct lock_class *class)
494{
495    char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS];
496    const char *name;
497
498    get_usage_chars(class, usage);
499
500    name = class->name;
501    if (!name) {
502        name = __get_key_name(class->key, str);
503        printk(" (%s", name);
504    } else {
505        printk(" (%s", name);
506        if (class->name_version > 1)
507            printk("#%d", class->name_version);
508        if (class->subclass)
509            printk("/%d", class->subclass);
510    }
511    printk("){%s}", usage);
512}
513
514static void print_lockdep_cache(struct lockdep_map *lock)
515{
516    const char *name;
517    char str[KSYM_NAME_LEN];
518
519    name = lock->name;
520    if (!name)
521        name = __get_key_name(lock->key->subkeys, str);
522
523    printk("%s", name);
524}
525
526static void print_lock(struct held_lock *hlock)
527{
528    print_lock_name(hlock_class(hlock));
529    printk(", at: ");
530    print_ip_sym(hlock->acquire_ip);
531}
532
533static void lockdep_print_held_locks(struct task_struct *curr)
534{
535    int i, depth = curr->lockdep_depth;
536
537    if (!depth) {
538        printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr));
539        return;
540    }
541    printk("%d lock%s held by %s/%d:\n",
542        depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr));
543
544    for (i = 0; i < depth; i++) {
545        printk(" #%d: ", i);
546        print_lock(curr->held_locks + i);
547    }
548}
549
550static void print_kernel_version(void)
551{
552    printk("%s %.*s\n", init_utsname()->release,
553        (int)strcspn(init_utsname()->version, " "),
554        init_utsname()->version);
555}
556
557static int very_verbose(struct lock_class *class)
558{
559#if VERY_VERBOSE
560    return class_filter(class);
561#endif
562    return 0;
563}
564
565/*
566 * Is this the address of a static object:
567 */
568static int static_obj(void *obj)
569{
570    unsigned long start = (unsigned long) &_stext,
571              end = (unsigned long) &_end,
572              addr = (unsigned long) obj;
573
574    /*
575     * static variable?
576     */
577    if ((addr >= start) && (addr < end))
578        return 1;
579
580    if (arch_is_kernel_data(addr))
581        return 1;
582
583    /*
584     * in-kernel percpu var?
585     */
586    if (is_kernel_percpu_address(addr))
587        return 1;
588
589    /*
590     * module static or percpu var?
591     */
592    return is_module_address(addr) || is_module_percpu_address(addr);
593}
594
595/*
596 * To make lock name printouts unique, we calculate a unique
597 * class->name_version generation counter:
598 */
599static int count_matching_names(struct lock_class *new_class)
600{
601    struct lock_class *class;
602    int count = 0;
603
604    if (!new_class->name)
605        return 0;
606
607    list_for_each_entry(class, &all_lock_classes, lock_entry) {
608        if (new_class->key - new_class->subclass == class->key)
609            return class->name_version;
610        if (class->name && !strcmp(class->name, new_class->name))
611            count = max(count, class->name_version);
612    }
613
614    return count + 1;
615}
616
617/*
618 * Register a lock's class in the hash-table, if the class is not present
619 * yet. Otherwise we look it up. We cache the result in the lock object
620 * itself, so actual lookup of the hash should be once per lock object.
621 */
622static inline struct lock_class *
623look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
624{
625    struct lockdep_subclass_key *key;
626    struct list_head *hash_head;
627    struct lock_class *class;
628
629#ifdef CONFIG_DEBUG_LOCKDEP
630    /*
631     * If the architecture calls into lockdep before initializing
632     * the hashes then we'll warn about it later. (we cannot printk
633     * right now)
634     */
635    if (unlikely(!lockdep_initialized)) {
636        lockdep_init();
637        lockdep_init_error = 1;
638        save_stack_trace(&lockdep_init_trace);
639    }
640#endif
641
642    /*
643     * Static locks do not have their class-keys yet - for them the key
644     * is the lock object itself:
645     */
646    if (unlikely(!lock->key))
647        lock->key = (void *)lock;
648
649    /*
650     * NOTE: the class-key must be unique. For dynamic locks, a static
651     * lock_class_key variable is passed in through the mutex_init()
652     * (or spin_lock_init()) call - which acts as the key. For static
653     * locks we use the lock object itself as the key.
654     */
655    BUILD_BUG_ON(sizeof(struct lock_class_key) >
656            sizeof(struct lockdep_map));
657
658    key = lock->key->subkeys + subclass;
659
660    hash_head = classhashentry(key);
661
662    /*
663     * We can walk the hash lockfree, because the hash only
664     * grows, and we are careful when adding entries to the end:
665     */
666    list_for_each_entry(class, hash_head, hash_entry) {
667        if (class->key == key) {
668            WARN_ON_ONCE(class->name != lock->name);
669            return class;
670        }
671    }
672
673    return NULL;
674}
675
676/*
677 * Register a lock's class in the hash-table, if the class is not present
678 * yet. Otherwise we look it up. We cache the result in the lock object
679 * itself, so actual lookup of the hash should be once per lock object.
680 */
681static inline struct lock_class *
682register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
683{
684    struct lockdep_subclass_key *key;
685    struct list_head *hash_head;
686    struct lock_class *class;
687    unsigned long flags;
688
689    class = look_up_lock_class(lock, subclass);
690    if (likely(class))
691        return class;
692
693    /*
694     * Debug-check: all keys must be persistent!
695      */
696    if (!static_obj(lock->key)) {
697        debug_locks_off();
698        printk("INFO: trying to register non-static key.\n");
699        printk("the code is fine but needs lockdep annotation.\n");
700        printk("turning off the locking correctness validator.\n");
701        dump_stack();
702
703        return NULL;
704    }
705
706    key = lock->key->subkeys + subclass;
707    hash_head = classhashentry(key);
708
709    raw_local_irq_save(flags);
710    if (!graph_lock()) {
711        raw_local_irq_restore(flags);
712        return NULL;
713    }
714    /*
715     * We have to do the hash-walk again, to avoid races
716     * with another CPU:
717     */
718    list_for_each_entry(class, hash_head, hash_entry)
719        if (class->key == key)
720            goto out_unlock_set;
721    /*
722     * Allocate a new key from the static array, and add it to
723     * the hash:
724     */
725    if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
726        if (!debug_locks_off_graph_unlock()) {
727            raw_local_irq_restore(flags);
728            return NULL;
729        }
730        raw_local_irq_restore(flags);
731
732        printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
733        printk("turning off the locking correctness validator.\n");
734        dump_stack();
735        return NULL;
736    }
737    class = lock_classes + nr_lock_classes++;
738    debug_atomic_inc(nr_unused_locks);
739    class->key = key;
740    class->name = lock->name;
741    class->subclass = subclass;
742    INIT_LIST_HEAD(&class->lock_entry);
743    INIT_LIST_HEAD(&class->locks_before);
744    INIT_LIST_HEAD(&class->locks_after);
745    class->name_version = count_matching_names(class);
746    /*
747     * We use RCU's safe list-add method to make
748     * parallel walking of the hash-list safe:
749     */
750    list_add_tail_rcu(&class->hash_entry, hash_head);
751    /*
752     * Add it to the global list of classes:
753     */
754    list_add_tail_rcu(&class->lock_entry, &all_lock_classes);
755
756    if (verbose(class)) {
757        graph_unlock();
758        raw_local_irq_restore(flags);
759
760        printk("\nnew class %p: %s", class->key, class->name);
761        if (class->name_version > 1)
762            printk("#%d", class->name_version);
763        printk("\n");
764        dump_stack();
765
766        raw_local_irq_save(flags);
767        if (!graph_lock()) {
768            raw_local_irq_restore(flags);
769            return NULL;
770        }
771    }
772out_unlock_set:
773    graph_unlock();
774    raw_local_irq_restore(flags);
775
776    if (!subclass || force)
777        lock->class_cache = class;
778
779    if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
780        return NULL;
781
782    return class;
783}
784
785#ifdef CONFIG_PROVE_LOCKING
786/*
787 * Allocate a lockdep entry. (assumes the graph_lock held, returns
788 * with NULL on failure)
789 */
790static struct lock_list *alloc_list_entry(void)
791{
792    if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
793        if (!debug_locks_off_graph_unlock())
794            return NULL;
795
796        printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
797        printk("turning off the locking correctness validator.\n");
798        dump_stack();
799        return NULL;
800    }
801    return list_entries + nr_list_entries++;
802}
803
804/*
805 * Add a new dependency to the head of the list:
806 */
807static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
808                struct list_head *head, unsigned long ip,
809                int distance, struct stack_trace *trace)
810{
811    struct lock_list *entry;
812    /*
813     * Lock not present yet - get a new dependency struct and
814     * add it to the list:
815     */
816    entry = alloc_list_entry();
817    if (!entry)
818        return 0;
819
820    entry->class = this;
821    entry->distance = distance;
822    entry->trace = *trace;
823    /*
824     * Since we never remove from the dependency list, the list can
825     * be walked lockless by other CPUs, it's only allocation
826     * that must be protected by the spinlock. But this also means
827     * we must make new entries visible only once writes to the
828     * entry become visible - hence the RCU op:
829     */
830    list_add_tail_rcu(&entry->entry, head);
831
832    return 1;
833}
834
835/*
836 * For good efficiency of modular, we use power of 2
837 */
838#define MAX_CIRCULAR_QUEUE_SIZE 4096UL
839#define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1)
840
841/*
842 * The circular_queue and helpers is used to implement the
843 * breadth-first search(BFS)algorithem, by which we can build
844 * the shortest path from the next lock to be acquired to the
845 * previous held lock if there is a circular between them.
846 */
847struct circular_queue {
848    unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
849    unsigned int front, rear;
850};
851
852static struct circular_queue lock_cq;
853
854unsigned int max_bfs_queue_depth;
855
856static unsigned int lockdep_dependency_gen_id;
857
858static inline void __cq_init(struct circular_queue *cq)
859{
860    cq->front = cq->rear = 0;
861    lockdep_dependency_gen_id++;
862}
863
864static inline int __cq_empty(struct circular_queue *cq)
865{
866    return (cq->front == cq->rear);
867}
868
869static inline int __cq_full(struct circular_queue *cq)
870{
871    return ((cq->rear + 1) & CQ_MASK) == cq->front;
872}
873
874static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
875{
876    if (__cq_full(cq))
877        return -1;
878
879    cq->element[cq->rear] = elem;
880    cq->rear = (cq->rear + 1) & CQ_MASK;
881    return 0;
882}
883
884static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
885{
886    if (__cq_empty(cq))
887        return -1;
888
889    *elem = cq->element[cq->front];
890    cq->front = (cq->front + 1) & CQ_MASK;
891    return 0;
892}
893
894static inline unsigned int __cq_get_elem_count(struct circular_queue *cq)
895{
896    return (cq->rear - cq->front) & CQ_MASK;
897}
898
899static inline void mark_lock_accessed(struct lock_list *lock,
900                    struct lock_list *parent)
901{
902    unsigned long nr;
903
904    nr = lock - list_entries;
905    WARN_ON(nr >= nr_list_entries);
906    lock->parent = parent;
907    lock->class->dep_gen_id = lockdep_dependency_gen_id;
908}
909
910static inline unsigned long lock_accessed(struct lock_list *lock)
911{
912    unsigned long nr;
913
914    nr = lock - list_entries;
915    WARN_ON(nr >= nr_list_entries);
916    return lock->class->dep_gen_id == lockdep_dependency_gen_id;
917}
918
919static inline struct lock_list *get_lock_parent(struct lock_list *child)
920{
921    return child->parent;
922}
923
924static inline int get_lock_depth(struct lock_list *child)
925{
926    int depth = 0;
927    struct lock_list *parent;
928
929    while ((parent = get_lock_parent(child))) {
930        child = parent;
931        depth++;
932    }
933    return depth;
934}
935
936static int __bfs(struct lock_list *source_entry,
937         void *data,
938         int (*match)(struct lock_list *entry, void *data),
939         struct lock_list **target_entry,
940         int forward)
941{
942    struct lock_list *entry;
943    struct list_head *head;
944    struct circular_queue *cq = &lock_cq;
945    int ret = 1;
946
947    if (match(source_entry, data)) {
948        *target_entry = source_entry;
949        ret = 0;
950        goto exit;
951    }
952
953    if (forward)
954        head = &source_entry->class->locks_after;
955    else
956        head = &source_entry->class->locks_before;
957
958    if (list_empty(head))
959        goto exit;
960
961    __cq_init(cq);
962    __cq_enqueue(cq, (unsigned long)source_entry);
963
964    while (!__cq_empty(cq)) {
965        struct lock_list *lock;
966
967        __cq_dequeue(cq, (unsigned long *)&lock);
968
969        if (!lock->class) {
970            ret = -2;
971            goto exit;
972        }
973
974        if (forward)
975            head = &lock->class->locks_after;
976        else
977            head = &lock->class->locks_before;
978
979        list_for_each_entry(entry, head, entry) {
980            if (!lock_accessed(entry)) {
981                unsigned int cq_depth;
982                mark_lock_accessed(entry, lock);
983                if (match(entry, data)) {
984                    *target_entry = entry;
985                    ret = 0;
986                    goto exit;
987                }
988
989                if (__cq_enqueue(cq, (unsigned long)entry)) {
990                    ret = -1;
991                    goto exit;
992                }
993                cq_depth = __cq_get_elem_count(cq);
994                if (max_bfs_queue_depth < cq_depth)
995                    max_bfs_queue_depth = cq_depth;
996            }
997        }
998    }
999exit:
1000    return ret;
1001}
1002
1003static inline int __bfs_forwards(struct lock_list *src_entry,
1004            void *data,
1005            int (*match)(struct lock_list *entry, void *data),
1006            struct lock_list **target_entry)
1007{
1008    return __bfs(src_entry, data, match, target_entry, 1);
1009
1010}
1011
1012static inline int __bfs_backwards(struct lock_list *src_entry,
1013            void *data,
1014            int (*match)(struct lock_list *entry, void *data),
1015            struct lock_list **target_entry)
1016{
1017    return __bfs(src_entry, data, match, target_entry, 0);
1018
1019}
1020
1021/*
1022 * Recursive, forwards-direction lock-dependency checking, used for
1023 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
1024 * checking.
1025 */
1026
1027/*
1028 * Print a dependency chain entry (this is only done when a deadlock
1029 * has been detected):
1030 */
1031static noinline int
1032print_circular_bug_entry(struct lock_list *target, int depth)
1033{
1034    if (debug_locks_silent)
1035        return 0;
1036    printk("\n-> #%u", depth);
1037    print_lock_name(target->class);
1038    printk(":\n");
1039    print_stack_trace(&target->trace, 6);
1040
1041    return 0;
1042}
1043
1044/*
1045 * When a circular dependency is detected, print the
1046 * header first:
1047 */
1048static noinline int
1049print_circular_bug_header(struct lock_list *entry, unsigned int depth,
1050            struct held_lock *check_src,
1051            struct held_lock *check_tgt)
1052{
1053    struct task_struct *curr = current;
1054
1055    if (debug_locks_silent)
1056        return 0;
1057
1058    printk("\n=======================================================\n");
1059    printk( "[ INFO: possible circular locking dependency detected ]\n");
1060    print_kernel_version();
1061    printk( "-------------------------------------------------------\n");
1062    printk("%s/%d is trying to acquire lock:\n",
1063        curr->comm, task_pid_nr(curr));
1064    print_lock(check_src);
1065    printk("\nbut task is already holding lock:\n");
1066    print_lock(check_tgt);
1067    printk("\nwhich lock already depends on the new lock.\n\n");
1068    printk("\nthe existing dependency chain (in reverse order) is:\n");
1069
1070    print_circular_bug_entry(entry, depth);
1071
1072    return 0;
1073}
1074
1075static inline int class_equal(struct lock_list *entry, void *data)
1076{
1077    return entry->class == data;
1078}
1079
1080static noinline int print_circular_bug(struct lock_list *this,
1081                struct lock_list *target,
1082                struct held_lock *check_src,
1083                struct held_lock *check_tgt)
1084{
1085    struct task_struct *curr = current;
1086    struct lock_list *parent;
1087    int depth;
1088
1089    if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1090        return 0;
1091
1092    if (!save_trace(&this->trace))
1093        return 0;
1094
1095    depth = get_lock_depth(target);
1096
1097    print_circular_bug_header(target, depth, check_src, check_tgt);
1098
1099    parent = get_lock_parent(target);
1100
1101    while (parent) {
1102        print_circular_bug_entry(parent, --depth);
1103        parent = get_lock_parent(parent);
1104    }
1105
1106    printk("\nother info that might help us debug this:\n\n");
1107    lockdep_print_held_locks(curr);
1108
1109    printk("\nstack backtrace:\n");
1110    dump_stack();
1111
1112    return 0;
1113}
1114
1115static noinline int print_bfs_bug(int ret)
1116{
1117    if (!debug_locks_off_graph_unlock())
1118        return 0;
1119
1120    WARN(1, "lockdep bfs error:%d\n", ret);
1121
1122    return 0;
1123}
1124
1125static int noop_count(struct lock_list *entry, void *data)
1126{
1127    (*(unsigned long *)data)++;
1128    return 0;
1129}
1130
1131unsigned long __lockdep_count_forward_deps(struct lock_list *this)
1132{
1133    unsigned long count = 0;
1134    struct lock_list *uninitialized_var(target_entry);
1135
1136    __bfs_forwards(this, (void *)&count, noop_count, &target_entry);
1137
1138    return count;
1139}
1140unsigned long lockdep_count_forward_deps(struct lock_class *class)
1141{
1142    unsigned long ret, flags;
1143    struct lock_list this;
1144
1145    this.parent = NULL;
1146    this.class = class;
1147
1148    local_irq_save(flags);
1149    arch_spin_lock(&lockdep_lock);
1150    ret = __lockdep_count_forward_deps(&this);
1151    arch_spin_unlock(&lockdep_lock);
1152    local_irq_restore(flags);
1153
1154    return ret;
1155}
1156
1157unsigned long __lockdep_count_backward_deps(struct lock_list *this)
1158{
1159    unsigned long count = 0;
1160    struct lock_list *uninitialized_var(target_entry);
1161
1162    __bfs_backwards(this, (void *)&count, noop_count, &target_entry);
1163
1164    return count;
1165}
1166
1167unsigned long lockdep_count_backward_deps(struct lock_class *class)
1168{
1169    unsigned long ret, flags;
1170    struct lock_list this;
1171
1172    this.parent = NULL;
1173    this.class = class;
1174
1175    local_irq_save(flags);
1176    arch_spin_lock(&lockdep_lock);
1177    ret = __lockdep_count_backward_deps(&this);
1178    arch_spin_unlock(&lockdep_lock);
1179    local_irq_restore(flags);
1180
1181    return ret;
1182}
1183
1184/*
1185 * Prove that the dependency graph starting at <entry> can not
1186 * lead to <target>. Print an error and return 0 if it does.
1187 */
1188static noinline int
1189check_noncircular(struct lock_list *root, struct lock_class *target,
1190        struct lock_list **target_entry)
1191{
1192    int result;
1193
1194    debug_atomic_inc(nr_cyclic_checks);
1195
1196    result = __bfs_forwards(root, target, class_equal, target_entry);
1197
1198    return result;
1199}
1200
1201#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1202/*
1203 * Forwards and backwards subgraph searching, for the purposes of
1204 * proving that two subgraphs can be connected by a new dependency
1205 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1206 */
1207
1208static inline int usage_match(struct lock_list *entry, void *bit)
1209{
1210    return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
1211}
1212
1213
1214
1215/*
1216 * Find a node in the forwards-direction dependency sub-graph starting
1217 * at @root->class that matches @bit.
1218 *
1219 * Return 0 if such a node exists in the subgraph, and put that node
1220 * into *@target_entry.
1221 *
1222 * Return 1 otherwise and keep *@target_entry unchanged.
1223 * Return <0 on error.
1224 */
1225static int
1226find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
1227            struct lock_list **target_entry)
1228{
1229    int result;
1230
1231    debug_atomic_inc(nr_find_usage_forwards_checks);
1232
1233    result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
1234
1235    return result;
1236}
1237
1238/*
1239 * Find a node in the backwards-direction dependency sub-graph starting
1240 * at @root->class that matches @bit.
1241 *
1242 * Return 0 if such a node exists in the subgraph, and put that node
1243 * into *@target_entry.
1244 *
1245 * Return 1 otherwise and keep *@target_entry unchanged.
1246 * Return <0 on error.
1247 */
1248static int
1249find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
1250            struct lock_list **target_entry)
1251{
1252    int result;
1253
1254    debug_atomic_inc(nr_find_usage_backwards_checks);
1255
1256    result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
1257
1258    return result;
1259}
1260
1261static void print_lock_class_header(struct lock_class *class, int depth)
1262{
1263    int bit;
1264
1265    printk("%*s->", depth, "");
1266    print_lock_name(class);
1267    printk(" ops: %lu", class->ops);
1268    printk(" {\n");
1269
1270    for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
1271        if (class->usage_mask & (1 << bit)) {
1272            int len = depth;
1273
1274            len += printk("%*s %s", depth, "", usage_str[bit]);
1275            len += printk(" at:\n");
1276            print_stack_trace(class->usage_traces + bit, len);
1277        }
1278    }
1279    printk("%*s }\n", depth, "");
1280
1281    printk("%*s ... key at: ",depth,"");
1282    print_ip_sym((unsigned long)class->key);
1283}
1284
1285/*
1286 * printk the shortest lock dependencies from @start to @end in reverse order:
1287 */
1288static void __used
1289print_shortest_lock_dependencies(struct lock_list *leaf,
1290                struct lock_list *root)
1291{
1292    struct lock_list *entry = leaf;
1293    int depth;
1294
1295    /*compute depth from generated tree by BFS*/
1296    depth = get_lock_depth(leaf);
1297
1298    do {
1299        print_lock_class_header(entry->class, depth);
1300        printk("%*s ... acquired at:\n", depth, "");
1301        print_stack_trace(&entry->trace, 2);
1302        printk("\n");
1303
1304        if (depth == 0 && (entry != root)) {
1305            printk("lockdep:%s bad BFS generated tree\n", __func__);
1306            break;
1307        }
1308
1309        entry = get_lock_parent(entry);
1310        depth--;
1311    } while (entry && (depth >= 0));
1312
1313    return;
1314}
1315
1316static int
1317print_bad_irq_dependency(struct task_struct *curr,
1318             struct lock_list *prev_root,
1319             struct lock_list *next_root,
1320             struct lock_list *backwards_entry,
1321             struct lock_list *forwards_entry,
1322             struct held_lock *prev,
1323             struct held_lock *next,
1324             enum lock_usage_bit bit1,
1325             enum lock_usage_bit bit2,
1326             const char *irqclass)
1327{
1328    if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1329        return 0;
1330
1331    printk("\n======================================================\n");
1332    printk( "[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
1333        irqclass, irqclass);
1334    print_kernel_version();
1335    printk( "------------------------------------------------------\n");
1336    printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
1337        curr->comm, task_pid_nr(curr),
1338        curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
1339        curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
1340        curr->hardirqs_enabled,
1341        curr->softirqs_enabled);
1342    print_lock(next);
1343
1344    printk("\nand this task is already holding:\n");
1345    print_lock(prev);
1346    printk("which would create a new lock dependency:\n");
1347    print_lock_name(hlock_class(prev));
1348    printk(" ->");
1349    print_lock_name(hlock_class(next));
1350    printk("\n");
1351
1352    printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
1353        irqclass);
1354    print_lock_name(backwards_entry->class);
1355    printk("\n... which became %s-irq-safe at:\n", irqclass);
1356
1357    print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
1358
1359    printk("\nto a %s-irq-unsafe lock:\n", irqclass);
1360    print_lock_name(forwards_entry->class);
1361    printk("\n... which became %s-irq-unsafe at:\n", irqclass);
1362    printk("...");
1363
1364    print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
1365
1366    printk("\nother info that might help us debug this:\n\n");
1367    lockdep_print_held_locks(curr);
1368
1369    printk("\nthe dependencies between %s-irq-safe lock", irqclass);
1370    printk(" and the holding lock:\n");
1371    if (!save_trace(&prev_root->trace))
1372        return 0;
1373    print_shortest_lock_dependencies(backwards_entry, prev_root);
1374
1375    printk("\nthe dependencies between the lock to be acquired");
1376    printk(" and %s-irq-unsafe lock:\n", irqclass);
1377    if (!save_trace(&next_root->trace))
1378        return 0;
1379    print_shortest_lock_dependencies(forwards_entry, next_root);
1380
1381    printk("\nstack backtrace:\n");
1382    dump_stack();
1383
1384    return 0;
1385}
1386
1387static int
1388check_usage(struct task_struct *curr, struct held_lock *prev,
1389        struct held_lock *next, enum lock_usage_bit bit_backwards,
1390        enum lock_usage_bit bit_forwards, const char *irqclass)
1391{
1392    int ret;
1393    struct lock_list this, that;
1394    struct lock_list *uninitialized_var(target_entry);
1395    struct lock_list *uninitialized_var(target_entry1);
1396
1397    this.parent = NULL;
1398
1399    this.class = hlock_class(prev);
1400    ret = find_usage_backwards(&this, bit_backwards, &target_entry);
1401    if (ret < 0)
1402        return print_bfs_bug(ret);
1403    if (ret == 1)
1404        return ret;
1405
1406    that.parent = NULL;
1407    that.class = hlock_class(next);
1408    ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
1409    if (ret < 0)
1410        return print_bfs_bug(ret);
1411    if (ret == 1)
1412        return ret;
1413
1414    return print_bad_irq_dependency(curr, &this, &that,
1415            target_entry, target_entry1,
1416            prev, next,
1417            bit_backwards, bit_forwards, irqclass);
1418}
1419
1420static const char *state_names[] = {
1421#define LOCKDEP_STATE(__STATE) \
1422    __stringify(__STATE),
1423#include "lockdep_states.h"
1424#undef LOCKDEP_STATE
1425};
1426
1427static const char *state_rnames[] = {
1428#define LOCKDEP_STATE(__STATE) \
1429    __stringify(__STATE)"-READ",
1430#include "lockdep_states.h"
1431#undef LOCKDEP_STATE
1432};
1433
1434static inline const char *state_name(enum lock_usage_bit bit)
1435{
1436    return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2];
1437}
1438
1439static int exclusive_bit(int new_bit)
1440{
1441    /*
1442     * USED_IN
1443     * USED_IN_READ
1444     * ENABLED
1445     * ENABLED_READ
1446     *
1447     * bit 0 - write/read
1448     * bit 1 - used_in/enabled
1449     * bit 2+ state
1450     */
1451
1452    int state = new_bit & ~3;
1453    int dir = new_bit & 2;
1454
1455    /*
1456     * keep state, bit flip the direction and strip read.
1457     */
1458    return state | (dir ^ 2);
1459}
1460
1461static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
1462               struct held_lock *next, enum lock_usage_bit bit)
1463{
1464    /*
1465     * Prove that the new dependency does not connect a hardirq-safe
1466     * lock with a hardirq-unsafe lock - to achieve this we search
1467     * the backwards-subgraph starting at <prev>, and the
1468     * forwards-subgraph starting at <next>:
1469     */
1470    if (!check_usage(curr, prev, next, bit,
1471               exclusive_bit(bit), state_name(bit)))
1472        return 0;
1473
1474    bit++; /* _READ */
1475
1476    /*
1477     * Prove that the new dependency does not connect a hardirq-safe-read
1478     * lock with a hardirq-unsafe lock - to achieve this we search
1479     * the backwards-subgraph starting at <prev>, and the
1480     * forwards-subgraph starting at <next>:
1481     */
1482    if (!check_usage(curr, prev, next, bit,
1483               exclusive_bit(bit), state_name(bit)))
1484        return 0;
1485
1486    return 1;
1487}
1488
1489static int
1490check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1491        struct held_lock *next)
1492{
1493#define LOCKDEP_STATE(__STATE) \
1494    if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \
1495        return 0;
1496#include "lockdep_states.h"
1497#undef LOCKDEP_STATE
1498
1499    return 1;
1500}
1501
1502static void inc_chains(void)
1503{
1504    if (current->hardirq_context)
1505        nr_hardirq_chains++;
1506    else {
1507        if (current->softirq_context)
1508            nr_softirq_chains++;
1509        else
1510            nr_process_chains++;
1511    }
1512}
1513
1514#else
1515
1516static inline int
1517check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1518        struct held_lock *next)
1519{
1520    return 1;
1521}
1522
1523static inline void inc_chains(void)
1524{
1525    nr_process_chains++;
1526}
1527
1528#endif
1529
1530static int
1531print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
1532           struct held_lock *next)
1533{
1534    if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1535        return 0;
1536
1537    printk("\n=============================================\n");
1538    printk( "[ INFO: possible recursive locking detected ]\n");
1539    print_kernel_version();
1540    printk( "---------------------------------------------\n");
1541    printk("%s/%d is trying to acquire lock:\n",
1542        curr->comm, task_pid_nr(curr));
1543    print_lock(next);
1544    printk("\nbut task is already holding lock:\n");
1545    print_lock(prev);
1546
1547    printk("\nother info that might help us debug this:\n");
1548    lockdep_print_held_locks(curr);
1549
1550    printk("\nstack backtrace:\n");
1551    dump_stack();
1552
1553    return 0;
1554}
1555
1556/*
1557 * Check whether we are holding such a class already.
1558 *
1559 * (Note that this has to be done separately, because the graph cannot
1560 * detect such classes of deadlocks.)
1561 *
1562 * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
1563 */
1564static int
1565check_deadlock(struct task_struct *curr, struct held_lock *next,
1566           struct lockdep_map *next_instance, int read)
1567{
1568    struct held_lock *prev;
1569    struct held_lock *nest = NULL;
1570    int i;
1571
1572    for (i = 0; i < curr->lockdep_depth; i++) {
1573        prev = curr->held_locks + i;
1574
1575        if (prev->instance == next->nest_lock)
1576            nest = prev;
1577
1578        if (hlock_class(prev) != hlock_class(next))
1579            continue;
1580
1581        /*
1582         * Allow read-after-read recursion of the same
1583         * lock class (i.e. read_lock(lock)+read_lock(lock)):
1584         */
1585        if ((read == 2) && prev->read)
1586            return 2;
1587
1588        /*
1589         * We're holding the nest_lock, which serializes this lock's
1590         * nesting behaviour.
1591         */
1592        if (nest)
1593            return 2;
1594
1595        return print_deadlock_bug(curr, prev, next);
1596    }
1597    return 1;
1598}
1599
1600/*
1601 * There was a chain-cache miss, and we are about to add a new dependency
1602 * to a previous lock. We recursively validate the following rules:
1603 *
1604 * - would the adding of the <prev> -> <next> dependency create a
1605 * circular dependency in the graph? [== circular deadlock]
1606 *
1607 * - does the new prev->next dependency connect any hardirq-safe lock
1608 * (in the full backwards-subgraph starting at <prev>) with any
1609 * hardirq-unsafe lock (in the full forwards-subgraph starting at
1610 * <next>)? [== illegal lock inversion with hardirq contexts]
1611 *
1612 * - does the new prev->next dependency connect any softirq-safe lock
1613 * (in the full backwards-subgraph starting at <prev>) with any
1614 * softirq-unsafe lock (in the full forwards-subgraph starting at
1615 * <next>)? [== illegal lock inversion with softirq contexts]
1616 *
1617 * any of these scenarios could lead to a deadlock.
1618 *
1619 * Then if all the validations pass, we add the forwards and backwards
1620 * dependency.
1621 */
1622static int
1623check_prev_add(struct task_struct *curr, struct held_lock *prev,
1624           struct held_lock *next, int distance, int trylock_loop)
1625{
1626    struct lock_list *entry;
1627    int ret;
1628    struct lock_list this;
1629    struct lock_list *uninitialized_var(target_entry);
1630    /*
1631     * Static variable, serialized by the graph_lock().
1632     *
1633     * We use this static variable to save the stack trace in case
1634     * we call into this function multiple times due to encountering
1635     * trylocks in the held lock stack.
1636     */
1637    static struct stack_trace trace;
1638
1639    /*
1640     * Prove that the new <prev> -> <next> dependency would not
1641     * create a circular dependency in the graph. (We do this by
1642     * forward-recursing into the graph starting at <next>, and
1643     * checking whether we can reach <prev>.)
1644     *
1645     * We are using global variables to control the recursion, to
1646     * keep the stackframe size of the recursive functions low:
1647     */
1648    this.class = hlock_class(next);
1649    this.parent = NULL;
1650    ret = check_noncircular(&this, hlock_class(prev), &target_entry);
1651    if (unlikely(!ret))
1652        return print_circular_bug(&this, target_entry, next, prev);
1653    else if (unlikely(ret < 0))
1654        return print_bfs_bug(ret);
1655
1656    if (!check_prev_add_irq(curr, prev, next))
1657        return 0;
1658
1659    /*
1660     * For recursive read-locks we do all the dependency checks,
1661     * but we dont store read-triggered dependencies (only
1662     * write-triggered dependencies). This ensures that only the
1663     * write-side dependencies matter, and that if for example a
1664     * write-lock never takes any other locks, then the reads are
1665     * equivalent to a NOP.
1666     */
1667    if (next->read == 2 || prev->read == 2)
1668        return 1;
1669    /*
1670     * Is the <prev> -> <next> dependency already present?
1671     *
1672     * (this may occur even though this is a new chain: consider
1673     * e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
1674     * chains - the second one will be new, but L1 already has
1675     * L2 added to its dependency list, due to the first chain.)
1676     */
1677    list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
1678        if (entry->class == hlock_class(next)) {
1679            if (distance == 1)
1680                entry->distance = 1;
1681            return 2;
1682        }
1683    }
1684
1685    if (!trylock_loop && !save_trace(&trace))
1686        return 0;
1687
1688    /*
1689     * Ok, all validations passed, add the new lock
1690     * to the previous lock's dependency list:
1691     */
1692    ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
1693                   &hlock_class(prev)->locks_after,
1694                   next->acquire_ip, distance, &trace);
1695
1696    if (!ret)
1697        return 0;
1698
1699    ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
1700                   &hlock_class(next)->locks_before,
1701                   next->acquire_ip, distance, &trace);
1702    if (!ret)
1703        return 0;
1704
1705    /*
1706     * Debugging printouts:
1707     */
1708    if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
1709        graph_unlock();
1710        printk("\n new dependency: ");
1711        print_lock_name(hlock_class(prev));
1712        printk(" => ");
1713        print_lock_name(hlock_class(next));
1714        printk("\n");
1715        dump_stack();
1716        return graph_lock();
1717    }
1718    return 1;
1719}
1720
1721/*
1722 * Add the dependency to all directly-previous locks that are 'relevant'.
1723 * The ones that are relevant are (in increasing distance from curr):
1724 * all consecutive trylock entries and the final non-trylock entry - or
1725 * the end of this context's lock-chain - whichever comes first.
1726 */
1727static int
1728check_prevs_add(struct task_struct *curr, struct held_lock *next)
1729{
1730    int depth = curr->lockdep_depth;
1731    int trylock_loop = 0;
1732    struct held_lock *hlock;
1733
1734    /*
1735     * Debugging checks.
1736     *
1737     * Depth must not be zero for a non-head lock:
1738     */
1739    if (!depth)
1740        goto out_bug;
1741    /*
1742     * At least two relevant locks must exist for this
1743     * to be a head:
1744     */
1745    if (curr->held_locks[depth].irq_context !=
1746            curr->held_locks[depth-1].irq_context)
1747        goto out_bug;
1748
1749    for (;;) {
1750        int distance = curr->lockdep_depth - depth + 1;
1751        hlock = curr->held_locks + depth-1;
1752        /*
1753         * Only non-recursive-read entries get new dependencies
1754         * added:
1755         */
1756        if (hlock->read != 2) {
1757            if (!check_prev_add(curr, hlock, next,
1758                        distance, trylock_loop))
1759                return 0;
1760            /*
1761             * Stop after the first non-trylock entry,
1762             * as non-trylock entries have added their
1763             * own direct dependencies already, so this
1764             * lock is connected to them indirectly:
1765             */
1766            if (!hlock->trylock)
1767                break;
1768        }
1769        depth--;
1770        /*
1771         * End of lock-stack?
1772         */
1773        if (!depth)
1774            break;
1775        /*
1776         * Stop the search if we cross into another context:
1777         */
1778        if (curr->held_locks[depth].irq_context !=
1779                curr->held_locks[depth-1].irq_context)
1780            break;
1781        trylock_loop = 1;
1782    }
1783    return 1;
1784out_bug:
1785    if (!debug_locks_off_graph_unlock())
1786        return 0;
1787
1788    WARN_ON(1);
1789
1790    return 0;
1791}
1792
1793unsigned long nr_lock_chains;
1794struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
1795int nr_chain_hlocks;
1796static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
1797
1798struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
1799{
1800    return lock_classes + chain_hlocks[chain->base + i];
1801}
1802
1803/*
1804 * Look up a dependency chain. If the key is not present yet then
1805 * add it and return 1 - in this case the new dependency chain is
1806 * validated. If the key is already hashed, return 0.
1807 * (On return with 1 graph_lock is held.)
1808 */
1809static inline int lookup_chain_cache(struct task_struct *curr,
1810                     struct held_lock *hlock,
1811                     u64 chain_key)
1812{
1813    struct lock_class *class = hlock_class(hlock);
1814    struct list_head *hash_head = chainhashentry(chain_key);
1815    struct lock_chain *chain;
1816    struct held_lock *hlock_curr, *hlock_next;
1817    int i, j, n, cn;
1818
1819    if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1820        return 0;
1821    /*
1822     * We can walk it lock-free, because entries only get added
1823     * to the hash:
1824     */
1825    list_for_each_entry(chain, hash_head, entry) {
1826        if (chain->chain_key == chain_key) {
1827cache_hit:
1828            debug_atomic_inc(chain_lookup_hits);
1829            if (very_verbose(class))
1830                printk("\nhash chain already cached, key: "
1831                    "%016Lx tail class: [%p] %s\n",
1832                    (unsigned long long)chain_key,
1833                    class->key, class->name);
1834            return 0;
1835        }
1836    }
1837    if (very_verbose(class))
1838        printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n",
1839            (unsigned long long)chain_key, class->key, class->name);
1840    /*
1841     * Allocate a new chain entry from the static array, and add
1842     * it to the hash:
1843     */
1844    if (!graph_lock())
1845        return 0;
1846    /*
1847     * We have to walk the chain again locked - to avoid duplicates:
1848     */
1849    list_for_each_entry(chain, hash_head, entry) {
1850        if (chain->chain_key == chain_key) {
1851            graph_unlock();
1852            goto cache_hit;
1853        }
1854    }
1855    if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
1856        if (!debug_locks_off_graph_unlock())
1857            return 0;
1858
1859        printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
1860        printk("turning off the locking correctness validator.\n");
1861        dump_stack();
1862        return 0;
1863    }
1864    chain = lock_chains + nr_lock_chains++;
1865    chain->chain_key = chain_key;
1866    chain->irq_context = hlock->irq_context;
1867    /* Find the first held_lock of current chain */
1868    hlock_next = hlock;
1869    for (i = curr->lockdep_depth - 1; i >= 0; i--) {
1870        hlock_curr = curr->held_locks + i;
1871        if (hlock_curr->irq_context != hlock_next->irq_context)
1872            break;
1873        hlock_next = hlock;
1874    }
1875    i++;
1876    chain->depth = curr->lockdep_depth + 1 - i;
1877    cn = nr_chain_hlocks;
1878    while (cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS) {
1879        n = cmpxchg(&nr_chain_hlocks, cn, cn + chain->depth);
1880        if (n == cn)
1881            break;
1882        cn = n;
1883    }
1884    if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
1885        chain->base = cn;
1886        for (j = 0; j < chain->depth - 1; j++, i++) {
1887            int lock_id = curr->held_locks[i].class_idx - 1;
1888            chain_hlocks[chain->base + j] = lock_id;
1889        }
1890        chain_hlocks[chain->base + j] = class - lock_classes;
1891    }
1892    list_add_tail_rcu(&chain->entry, hash_head);
1893    debug_atomic_inc(chain_lookup_misses);
1894    inc_chains();
1895
1896    return 1;
1897}
1898
1899static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
1900        struct held_lock *hlock, int chain_head, u64 chain_key)
1901{
1902    /*
1903     * Trylock needs to maintain the stack of held locks, but it
1904     * does not add new dependencies, because trylock can be done
1905     * in any order.
1906     *
1907     * We look up the chain_key and do the O(N^2) check and update of
1908     * the dependencies only if this is a new dependency chain.
1909     * (If lookup_chain_cache() returns with 1 it acquires
1910     * graph_lock for us)
1911     */
1912    if (!hlock->trylock && (hlock->check == 2) &&
1913        lookup_chain_cache(curr, hlock, chain_key)) {
1914        /*
1915         * Check whether last held lock:
1916         *
1917         * - is irq-safe, if this lock is irq-unsafe
1918         * - is softirq-safe, if this lock is hardirq-unsafe
1919         *
1920         * And check whether the new lock's dependency graph
1921         * could lead back to the previous lock.
1922         *
1923         * any of these scenarios could lead to a deadlock. If
1924         * All validations
1925         */
1926        int ret = check_deadlock(curr, hlock, lock, hlock->read);
1927
1928        if (!ret)
1929            return 0;
1930        /*
1931         * Mark recursive read, as we jump over it when
1932         * building dependencies (just like we jump over
1933         * trylock entries):
1934         */
1935        if (ret == 2)
1936            hlock->read = 2;
1937        /*
1938         * Add dependency only if this lock is not the head
1939         * of the chain, and if it's not a secondary read-lock:
1940         */
1941        if (!chain_head && ret != 2)
1942            if (!check_prevs_add(curr, hlock))
1943                return 0;
1944        graph_unlock();
1945    } else
1946        /* after lookup_chain_cache(): */
1947        if (unlikely(!debug_locks))
1948            return 0;
1949
1950    return 1;
1951}
1952#else
1953static inline int validate_chain(struct task_struct *curr,
1954               struct lockdep_map *lock, struct held_lock *hlock,
1955        int chain_head, u64 chain_key)
1956{
1957    return 1;
1958}
1959#endif
1960
1961/*
1962 * We are building curr_chain_key incrementally, so double-check
1963 * it from scratch, to make sure that it's done correctly:
1964 */
1965static void check_chain_key(struct task_struct *curr)
1966{
1967#ifdef CONFIG_DEBUG_LOCKDEP
1968    struct held_lock *hlock, *prev_hlock = NULL;
1969    unsigned int i, id;
1970    u64 chain_key = 0;
1971
1972    for (i = 0; i < curr->lockdep_depth; i++) {
1973        hlock = curr->held_locks + i;
1974        if (chain_key != hlock->prev_chain_key) {
1975            debug_locks_off();
1976            WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
1977                curr->lockdep_depth, i,
1978                (unsigned long long)chain_key,
1979                (unsigned long long)hlock->prev_chain_key);
1980            return;
1981        }
1982        id = hlock->class_idx - 1;
1983        if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
1984            return;
1985
1986        if (prev_hlock && (prev_hlock->irq_context !=
1987                            hlock->irq_context))
1988            chain_key = 0;
1989        chain_key = iterate_chain_key(chain_key, id);
1990        prev_hlock = hlock;
1991    }
1992    if (chain_key != curr->curr_chain_key) {
1993        debug_locks_off();
1994        WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
1995            curr->lockdep_depth, i,
1996            (unsigned long long)chain_key,
1997            (unsigned long long)curr->curr_chain_key);
1998    }
1999#endif
2000}
2001
2002static int
2003print_usage_bug(struct task_struct *curr, struct held_lock *this,
2004        enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
2005{
2006    if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2007        return 0;
2008
2009    printk("\n=================================\n");
2010    printk( "[ INFO: inconsistent lock state ]\n");
2011    print_kernel_version();
2012    printk( "---------------------------------\n");
2013
2014    printk("inconsistent {%s} -> {%s} usage.\n",
2015        usage_str[prev_bit], usage_str[new_bit]);
2016
2017    printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
2018        curr->comm, task_pid_nr(curr),
2019        trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
2020        trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
2021        trace_hardirqs_enabled(curr),
2022        trace_softirqs_enabled(curr));
2023    print_lock(this);
2024
2025    printk("{%s} state was registered at:\n", usage_str[prev_bit]);
2026    print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
2027
2028    print_irqtrace_events(curr);
2029    printk("\nother info that might help us debug this:\n");
2030    lockdep_print_held_locks(curr);
2031
2032    printk("\nstack backtrace:\n");
2033    dump_stack();
2034
2035    return 0;
2036}
2037
2038/*
2039 * Print out an error if an invalid bit is set:
2040 */
2041static inline int
2042valid_state(struct task_struct *curr, struct held_lock *this,
2043        enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
2044{
2045    if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
2046        return print_usage_bug(curr, this, bad_bit, new_bit);
2047    return 1;
2048}
2049
2050static int mark_lock(struct task_struct *curr, struct held_lock *this,
2051             enum lock_usage_bit new_bit);
2052
2053#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
2054
2055/*
2056 * print irq inversion bug:
2057 */
2058static int
2059print_irq_inversion_bug(struct task_struct *curr,
2060            struct lock_list *root, struct lock_list *other,
2061            struct held_lock *this, int forwards,
2062            const char *irqclass)
2063{
2064    if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2065        return 0;
2066
2067    printk("\n=========================================================\n");
2068    printk( "[ INFO: possible irq lock inversion dependency detected ]\n");
2069    print_kernel_version();
2070    printk( "---------------------------------------------------------\n");
2071    printk("%s/%d just changed the state of lock:\n",
2072        curr->comm, task_pid_nr(curr));
2073    print_lock(this);
2074    if (forwards)
2075        printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
2076    else
2077        printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
2078    print_lock_name(other->class);
2079    printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
2080
2081    printk("\nother info that might help us debug this:\n");
2082    lockdep_print_held_locks(curr);
2083
2084    printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
2085    if (!save_trace(&root->trace))
2086        return 0;
2087    print_shortest_lock_dependencies(other, root);
2088
2089    printk("\nstack backtrace:\n");
2090    dump_stack();
2091
2092    return 0;
2093}
2094
2095/*
2096 * Prove that in the forwards-direction subgraph starting at <this>
2097 * there is no lock matching <mask>:
2098 */
2099static int
2100check_usage_forwards(struct task_struct *curr, struct held_lock *this,
2101             enum lock_usage_bit bit, const char *irqclass)
2102{
2103    int ret;
2104    struct lock_list root;
2105    struct lock_list *uninitialized_var(target_entry);
2106
2107    root.parent = NULL;
2108    root.class = hlock_class(this);
2109    ret = find_usage_forwards(&root, bit, &target_entry);
2110    if (ret < 0)
2111        return print_bfs_bug(ret);
2112    if (ret == 1)
2113        return ret;
2114
2115    return print_irq_inversion_bug(curr, &root, target_entry,
2116                    this, 1, irqclass);
2117}
2118
2119/*
2120 * Prove that in the backwards-direction subgraph starting at <this>
2121 * there is no lock matching <mask>:
2122 */
2123static int
2124check_usage_backwards(struct task_struct *curr, struct held_lock *this,
2125              enum lock_usage_bit bit, const char *irqclass)
2126{
2127    int ret;
2128    struct lock_list root;
2129    struct lock_list *uninitialized_var(target_entry);
2130
2131    root.parent = NULL;
2132    root.class = hlock_class(this);
2133    ret = find_usage_backwards(&root, bit, &target_entry);
2134    if (ret < 0)
2135        return print_bfs_bug(ret);
2136    if (ret == 1)
2137        return ret;
2138
2139    return print_irq_inversion_bug(curr, &root, target_entry,
2140                    this, 0, irqclass);
2141}
2142
2143void print_irqtrace_events(struct task_struct *curr)
2144{
2145    printk("irq event stamp: %u\n", curr->irq_events);
2146    printk("hardirqs last enabled at (%u): ", curr->hardirq_enable_event);
2147    print_ip_sym(curr->hardirq_enable_ip);
2148    printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event);
2149    print_ip_sym(curr->hardirq_disable_ip);
2150    printk("softirqs last enabled at (%u): ", curr->softirq_enable_event);
2151    print_ip_sym(curr->softirq_enable_ip);
2152    printk("softirqs last disabled at (%u): ", curr->softirq_disable_event);
2153    print_ip_sym(curr->softirq_disable_ip);
2154}
2155
2156static int HARDIRQ_verbose(struct lock_class *class)
2157{
2158#if HARDIRQ_VERBOSE
2159    return class_filter(class);
2160#endif
2161    return 0;
2162}
2163
2164static int SOFTIRQ_verbose(struct lock_class *class)
2165{
2166#if SOFTIRQ_VERBOSE
2167    return class_filter(class);
2168#endif
2169    return 0;
2170}
2171
2172static int RECLAIM_FS_verbose(struct lock_class *class)
2173{
2174#if RECLAIM_VERBOSE
2175    return class_filter(class);
2176#endif
2177    return 0;
2178}
2179
2180#define STRICT_READ_CHECKS 1
2181
2182static int (*state_verbose_f[])(struct lock_class *class) = {
2183#define LOCKDEP_STATE(__STATE) \
2184    __STATE##_verbose,
2185#include "lockdep_states.h"
2186#undef LOCKDEP_STATE
2187};
2188
2189static inline int state_verbose(enum lock_usage_bit bit,
2190                struct lock_class *class)
2191{
2192    return state_verbose_f[bit >> 2](class);
2193}
2194
2195typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
2196                 enum lock_usage_bit bit, const char *name);
2197
2198static int
2199mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2200        enum lock_usage_bit new_bit)
2201{
2202    int excl_bit = exclusive_bit(new_bit);
2203    int read = new_bit & 1;
2204    int dir = new_bit & 2;
2205
2206    /*
2207     * mark USED_IN has to look forwards -- to ensure no dependency
2208     * has ENABLED state, which would allow recursion deadlocks.
2209     *
2210     * mark ENABLED has to look backwards -- to ensure no dependee
2211     * has USED_IN state, which, again, would allow recursion deadlocks.
2212     */
2213    check_usage_f usage = dir ?
2214        check_usage_backwards : check_usage_forwards;
2215
2216    /*
2217     * Validate that this particular lock does not have conflicting
2218     * usage states.
2219     */
2220    if (!valid_state(curr, this, new_bit, excl_bit))
2221        return 0;
2222
2223    /*
2224     * Validate that the lock dependencies don't have conflicting usage
2225     * states.
2226     */
2227    if ((!read || !dir || STRICT_READ_CHECKS) &&
2228            !usage(curr, this, excl_bit, state_name(new_bit & ~1)))
2229        return 0;
2230
2231    /*
2232     * Check for read in write conflicts
2233     */
2234    if (!read) {
2235        if (!valid_state(curr, this, new_bit, excl_bit + 1))
2236            return 0;
2237
2238        if (STRICT_READ_CHECKS &&
2239            !usage(curr, this, excl_bit + 1,
2240                state_name(new_bit + 1)))
2241            return 0;
2242    }
2243
2244    if (state_verbose(new_bit, hlock_class(this)))
2245        return 2;
2246
2247    return 1;
2248}
2249
2250enum mark_type {
2251#define LOCKDEP_STATE(__STATE) __STATE,
2252#include "lockdep_states.h"
2253#undef LOCKDEP_STATE
2254};
2255
2256/*
2257 * Mark all held locks with a usage bit:
2258 */
2259static int
2260mark_held_locks(struct task_struct *curr, enum mark_type mark)
2261{
2262    enum lock_usage_bit usage_bit;
2263    struct held_lock *hlock;
2264    int i;
2265
2266    for (i = 0; i < curr->lockdep_depth; i++) {
2267        hlock = curr->held_locks + i;
2268
2269        usage_bit = 2 + (mark << 2); /* ENABLED */
2270        if (hlock->read)
2271            usage_bit += 1; /* READ */
2272
2273        BUG_ON(usage_bit >= LOCK_USAGE_STATES);
2274
2275        if (!mark_lock(curr, hlock, usage_bit))
2276            return 0;
2277    }
2278
2279    return 1;
2280}
2281
2282/*
2283 * Debugging helper: via this flag we know that we are in
2284 * 'early bootup code', and will warn about any invalid irqs-on event:
2285 */
2286static int early_boot_irqs_enabled;
2287
2288void early_boot_irqs_off(void)
2289{
2290    early_boot_irqs_enabled = 0;
2291}
2292
2293void early_boot_irqs_on(void)
2294{
2295    early_boot_irqs_enabled = 1;
2296}
2297
2298/*
2299 * Hardirqs will be enabled:
2300 */
2301void trace_hardirqs_on_caller(unsigned long ip)
2302{
2303    struct task_struct *curr = current;
2304
2305    time_hardirqs_on(CALLER_ADDR0, ip);
2306
2307    if (unlikely(!debug_locks || current->lockdep_recursion))
2308        return;
2309
2310    if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled)))
2311        return;
2312
2313    if (unlikely(curr->hardirqs_enabled)) {
2314        /*
2315         * Neither irq nor preemption are disabled here
2316         * so this is racy by nature but loosing one hit
2317         * in a stat is not a big deal.
2318         */
2319        __debug_atomic_inc(redundant_hardirqs_on);
2320        return;
2321    }
2322    /* we'll do an OFF -> ON transition: */
2323    curr->hardirqs_enabled = 1;
2324
2325    if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2326        return;
2327    if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
2328        return;
2329    /*
2330     * We are going to turn hardirqs on, so set the
2331     * usage bit for all held locks:
2332     */
2333    if (!mark_held_locks(curr, HARDIRQ))
2334        return;
2335    /*
2336     * If we have softirqs enabled, then set the usage
2337     * bit for all held locks. (disabled hardirqs prevented
2338     * this bit from being set before)
2339     */
2340    if (curr->softirqs_enabled)
2341        if (!mark_held_locks(curr, SOFTIRQ))
2342            return;
2343
2344    curr->hardirq_enable_ip = ip;
2345    curr->hardirq_enable_event = ++curr->irq_events;
2346    debug_atomic_inc(hardirqs_on_events);
2347}
2348EXPORT_SYMBOL(trace_hardirqs_on_caller);
2349
2350void trace_hardirqs_on(void)
2351{
2352    trace_hardirqs_on_caller(CALLER_ADDR0);
2353}
2354EXPORT_SYMBOL(trace_hardirqs_on);
2355
2356/*
2357 * Hardirqs were disabled:
2358 */
2359void trace_hardirqs_off_caller(unsigned long ip)
2360{
2361    struct task_struct *curr = current;
2362
2363    time_hardirqs_off(CALLER_ADDR0, ip);
2364
2365    if (unlikely(!debug_locks || current->lockdep_recursion))
2366        return;
2367
2368    if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2369        return;
2370
2371    if (curr->hardirqs_enabled) {
2372        /*
2373         * We have done an ON -> OFF transition:
2374         */
2375        curr->hardirqs_enabled = 0;
2376        curr->hardirq_disable_ip = ip;
2377        curr->hardirq_disable_event = ++curr->irq_events;
2378        debug_atomic_inc(hardirqs_off_events);
2379    } else
2380        debug_atomic_inc(redundant_hardirqs_off);
2381}
2382EXPORT_SYMBOL(trace_hardirqs_off_caller);
2383
2384void trace_hardirqs_off(void)
2385{
2386    trace_hardirqs_off_caller(CALLER_ADDR0);
2387}
2388EXPORT_SYMBOL(trace_hardirqs_off);
2389
2390/*
2391 * Softirqs will be enabled:
2392 */
2393void trace_softirqs_on(unsigned long ip)
2394{
2395    struct task_struct *curr = current;
2396
2397    if (unlikely(!debug_locks))
2398        return;
2399
2400    if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2401        return;
2402
2403    if (curr->softirqs_enabled) {
2404        debug_atomic_inc(redundant_softirqs_on);
2405        return;
2406    }
2407
2408    /*
2409     * We'll do an OFF -> ON transition:
2410     */
2411    curr->softirqs_enabled = 1;
2412    curr->softirq_enable_ip = ip;
2413    curr->softirq_enable_event = ++curr->irq_events;
2414    debug_atomic_inc(softirqs_on_events);
2415    /*
2416     * We are going to turn softirqs on, so set the
2417     * usage bit for all held locks, if hardirqs are
2418     * enabled too:
2419     */
2420    if (curr->hardirqs_enabled)
2421        mark_held_locks(curr, SOFTIRQ);
2422}
2423
2424/*
2425 * Softirqs were disabled:
2426 */
2427void trace_softirqs_off(unsigned long ip)
2428{
2429    struct task_struct *curr = current;
2430
2431    if (unlikely(!debug_locks))
2432        return;
2433
2434    if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2435        return;
2436
2437    if (curr->softirqs_enabled) {
2438        /*
2439         * We have done an ON -> OFF transition:
2440         */
2441        curr->softirqs_enabled = 0;
2442        curr->softirq_disable_ip = ip;
2443        curr->softirq_disable_event = ++curr->irq_events;
2444        debug_atomic_inc(softirqs_off_events);
2445        DEBUG_LOCKS_WARN_ON(!softirq_count());
2446    } else
2447        debug_atomic_inc(redundant_softirqs_off);
2448}
2449
2450static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags)
2451{
2452    struct task_struct *curr = current;
2453
2454    if (unlikely(!debug_locks))
2455        return;
2456
2457    /* no reclaim without waiting on it */
2458    if (!(gfp_mask & __GFP_WAIT))
2459        return;
2460
2461    /* this guy won't enter reclaim */
2462    if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC))
2463        return;
2464
2465    /* We're only interested __GFP_FS allocations for now */
2466    if (!(gfp_mask & __GFP_FS))
2467        return;
2468
2469    if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags)))
2470        return;
2471
2472    mark_held_locks(curr, RECLAIM_FS);
2473}
2474
2475static void check_flags(unsigned long flags);
2476
2477void lockdep_trace_alloc(gfp_t gfp_mask)
2478{
2479    unsigned long flags;
2480
2481    if (unlikely(current->lockdep_recursion))
2482        return;
2483
2484    raw_local_irq_save(flags);
2485    check_flags(flags);
2486    current->lockdep_recursion = 1;
2487    __lockdep_trace_alloc(gfp_mask, flags);
2488    current->lockdep_recursion = 0;
2489    raw_local_irq_restore(flags);
2490}
2491
2492static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
2493{
2494    /*
2495     * If non-trylock use in a hardirq or softirq context, then
2496     * mark the lock as used in these contexts:
2497     */
2498    if (!hlock->trylock) {
2499        if (hlock->read) {
2500            if (curr->hardirq_context)
2501                if (!mark_lock(curr, hlock,
2502                        LOCK_USED_IN_HARDIRQ_READ))
2503                    return 0;
2504            if (curr->softirq_context)
2505                if (!mark_lock(curr, hlock,
2506                        LOCK_USED_IN_SOFTIRQ_READ))
2507                    return 0;
2508        } else {
2509            if (curr->hardirq_context)
2510                if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
2511                    return 0;
2512            if (curr->softirq_context)
2513                if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
2514                    return 0;
2515        }
2516    }
2517    if (!hlock->hardirqs_off) {
2518        if (hlock->read) {
2519            if (!mark_lock(curr, hlock,
2520                    LOCK_ENABLED_HARDIRQ_READ))
2521                return 0;
2522            if (curr->softirqs_enabled)
2523                if (!mark_lock(curr, hlock,
2524                        LOCK_ENABLED_SOFTIRQ_READ))
2525                    return 0;
2526        } else {
2527            if (!mark_lock(curr, hlock,
2528                    LOCK_ENABLED_HARDIRQ))
2529                return 0;
2530            if (curr->softirqs_enabled)
2531                if (!mark_lock(curr, hlock,
2532                        LOCK_ENABLED_SOFTIRQ))
2533                    return 0;
2534        }
2535    }
2536
2537    /*
2538     * We reuse the irq context infrastructure more broadly as a general
2539     * context checking code. This tests GFP_FS recursion (a lock taken
2540     * during reclaim for a GFP_FS allocation is held over a GFP_FS
2541     * allocation).
2542     */
2543    if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) {
2544        if (hlock->read) {
2545            if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ))
2546                    return 0;
2547        } else {
2548            if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS))
2549                    return 0;
2550        }
2551    }
2552
2553    return 1;
2554}
2555
2556static int separate_irq_context(struct task_struct *curr,
2557        struct held_lock *hlock)
2558{
2559    unsigned int depth = curr->lockdep_depth;
2560
2561    /*
2562     * Keep track of points where we cross into an interrupt context:
2563     */
2564    hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
2565                curr->softirq_context;
2566    if (depth) {
2567        struct held_lock *prev_hlock;
2568
2569        prev_hlock = curr->held_locks + depth-1;
2570        /*
2571         * If we cross into another context, reset the
2572         * hash key (this also prevents the checking and the
2573         * adding of the dependency to 'prev'):
2574         */
2575        if (prev_hlock->irq_context != hlock->irq_context)
2576            return 1;
2577    }
2578    return 0;
2579}
2580
2581#else
2582
2583static inline
2584int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2585        enum lock_usage_bit new_bit)
2586{
2587    WARN_ON(1);
2588    return 1;
2589}
2590
2591static inline int mark_irqflags(struct task_struct *curr,
2592        struct held_lock *hlock)
2593{
2594    return 1;
2595}
2596
2597static inline int separate_irq_context(struct task_struct *curr,
2598        struct held_lock *hlock)
2599{
2600    return 0;
2601}
2602
2603void lockdep_trace_alloc(gfp_t gfp_mask)
2604{
2605}
2606
2607#endif
2608
2609/*
2610 * Mark a lock with a usage bit, and validate the state transition:
2611 */
2612static int mark_lock(struct task_struct *curr, struct held_lock *this,
2613                 enum lock_usage_bit new_bit)
2614{
2615    unsigned int new_mask = 1 << new_bit, ret = 1;
2616
2617    /*
2618     * If already set then do not dirty the cacheline,
2619     * nor do any checks:
2620     */
2621    if (likely(hlock_class(this)->usage_mask & new_mask))
2622        return 1;
2623
2624    if (!graph_lock())
2625        return 0;
2626    /*
2627     * Make sure we didnt race:
2628     */
2629    if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
2630        graph_unlock();
2631        return 1;
2632    }
2633
2634    hlock_class(this)->usage_mask |= new_mask;
2635
2636    if (!save_trace(hlock_class(this)->usage_traces + new_bit))
2637        return 0;
2638
2639    switch (new_bit) {
2640#define LOCKDEP_STATE(__STATE) \
2641    case LOCK_USED_IN_##__STATE: \
2642    case LOCK_USED_IN_##__STATE##_READ: \
2643    case LOCK_ENABLED_##__STATE: \
2644    case LOCK_ENABLED_##__STATE##_READ:
2645#include "lockdep_states.h"
2646#undef LOCKDEP_STATE
2647        ret = mark_lock_irq(curr, this, new_bit);
2648        if (!ret)
2649            return 0;
2650        break;
2651    case LOCK_USED:
2652        debug_atomic_dec(nr_unused_locks);
2653        break;
2654    default:
2655        if (!debug_locks_off_graph_unlock())
2656            return 0;
2657        WARN_ON(1);
2658        return 0;
2659    }
2660
2661    graph_unlock();
2662
2663    /*
2664     * We must printk outside of the graph_lock:
2665     */
2666    if (ret == 2) {
2667        printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
2668        print_lock(this);
2669        print_irqtrace_events(curr);
2670        dump_stack();
2671    }
2672
2673    return ret;
2674}
2675
2676/*
2677 * Initialize a lock instance's lock-class mapping info:
2678 */
2679void lockdep_init_map(struct lockdep_map *lock, const char *name,
2680              struct lock_class_key *key, int subclass)
2681{
2682    lock->class_cache = NULL;
2683#ifdef CONFIG_LOCK_STAT
2684    lock->cpu = raw_smp_processor_id();
2685#endif
2686
2687    if (DEBUG_LOCKS_WARN_ON(!name)) {
2688        lock->name = "NULL";
2689        return;
2690    }
2691
2692    lock->name = name;
2693
2694    if (DEBUG_LOCKS_WARN_ON(!key))
2695        return;
2696    /*
2697     * Sanity check, the lock-class key must be persistent:
2698     */
2699    if (!static_obj(key)) {
2700        printk("BUG: key %p not in .data!\n", key);
2701        DEBUG_LOCKS_WARN_ON(1);
2702        return;
2703    }
2704    lock->key = key;
2705
2706    if (unlikely(!debug_locks))
2707        return;
2708
2709    if (subclass)
2710        register_lock_class(lock, subclass, 1);
2711}
2712EXPORT_SYMBOL_GPL(lockdep_init_map);
2713
2714struct lock_class_key __lockdep_no_validate__;
2715
2716/*
2717 * This gets called for every mutex_lock*()/spin_lock*() operation.
2718 * We maintain the dependency maps and validate the locking attempt:
2719 */
2720static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2721              int trylock, int read, int check, int hardirqs_off,
2722              struct lockdep_map *nest_lock, unsigned long ip,
2723              int references)
2724{
2725    struct task_struct *curr = current;
2726    struct lock_class *class = NULL;
2727    struct held_lock *hlock;
2728    unsigned int depth, id;
2729    int chain_head = 0;
2730    int class_idx;
2731    u64 chain_key;
2732
2733    if (!prove_locking)
2734        check = 1;
2735
2736    if (unlikely(!debug_locks))
2737        return 0;
2738
2739    if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2740        return 0;
2741
2742    if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
2743        debug_locks_off();
2744        printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");
2745        printk("turning off the locking correctness validator.\n");
2746        dump_stack();
2747        return 0;
2748    }
2749
2750    if (lock->key == &__lockdep_no_validate__)
2751        check = 1;
2752
2753    if (!subclass)
2754        class = lock->class_cache;
2755    /*
2756     * Not cached yet or subclass?
2757     */
2758    if (unlikely(!class)) {
2759        class = register_lock_class(lock, subclass, 0);
2760        if (!class)
2761            return 0;
2762    }
2763    atomic_inc((atomic_t *)&class->ops);
2764    if (very_verbose(class)) {
2765        printk("\nacquire class [%p] %s", class->key, class->name);
2766        if (class->name_version > 1)
2767            printk("#%d", class->name_version);
2768        printk("\n");
2769        dump_stack();
2770    }
2771
2772    /*
2773     * Add the lock to the list of currently held locks.
2774     * (we dont increase the depth just yet, up until the
2775     * dependency checks are done)
2776     */
2777    depth = curr->lockdep_depth;
2778    if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
2779        return 0;
2780
2781    class_idx = class - lock_classes + 1;
2782
2783    if (depth) {
2784        hlock = curr->held_locks + depth - 1;
2785        if (hlock->class_idx == class_idx && nest_lock) {
2786            if (hlock->references)
2787                hlock->references++;
2788            else
2789                hlock->references = 2;
2790
2791            return 1;
2792        }
2793    }
2794
2795    hlock = curr->held_locks + depth;
2796    if (DEBUG_LOCKS_WARN_ON(!class))
2797        return 0;
2798    hlock->class_idx = class_idx;
2799    hlock->acquire_ip = ip;
2800    hlock->instance = lock;
2801    hlock->nest_lock = nest_lock;
2802    hlock->trylock = trylock;
2803    hlock->read = read;
2804    hlock->check = check;
2805    hlock->hardirqs_off = !!hardirqs_off;
2806    hlock->references = references;
2807#ifdef CONFIG_LOCK_STAT
2808    hlock->waittime_stamp = 0;
2809    hlock->holdtime_stamp = lockstat_clock();
2810#endif
2811
2812    if (check == 2 && !mark_irqflags(curr, hlock))
2813        return 0;
2814
2815    /* mark it as used: */
2816    if (!mark_lock(curr, hlock, LOCK_USED))
2817        return 0;
2818
2819    /*
2820     * Calculate the chain hash: it's the combined hash of all the
2821     * lock keys along the dependency chain. We save the hash value
2822     * at every step so that we can get the current hash easily
2823     * after unlock. The chain hash is then used to cache dependency
2824     * results.
2825     *
2826     * The 'key ID' is what is the most compact key value to drive
2827     * the hash, not class->key.
2828     */
2829    id = class - lock_classes;
2830    if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
2831        return 0;
2832
2833    chain_key = curr->curr_chain_key;
2834    if (!depth) {
2835        if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
2836            return 0;
2837        chain_head = 1;
2838    }
2839
2840    hlock->prev_chain_key = chain_key;
2841    if (separate_irq_context(curr, hlock)) {
2842        chain_key = 0;
2843        chain_head = 1;
2844    }
2845    chain_key = iterate_chain_key(chain_key, id);
2846
2847    if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
2848        return 0;
2849
2850    curr->curr_chain_key = chain_key;
2851    curr->lockdep_depth++;
2852    check_chain_key(curr);
2853#ifdef CONFIG_DEBUG_LOCKDEP
2854    if (unlikely(!debug_locks))
2855        return 0;
2856#endif
2857    if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
2858        debug_locks_off();
2859        printk("BUG: MAX_LOCK_DEPTH too low!\n");
2860        printk("turning off the locking correctness validator.\n");
2861        dump_stack();
2862        return 0;
2863    }
2864
2865    if (unlikely(curr->lockdep_depth > max_lockdep_depth))
2866        max_lockdep_depth = curr->lockdep_depth;
2867
2868    return 1;
2869}
2870
2871static int
2872print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
2873               unsigned long ip)
2874{
2875    if (!debug_locks_off())
2876        return 0;
2877    if (debug_locks_silent)
2878        return 0;
2879
2880    printk("\n=====================================\n");
2881    printk( "[ BUG: bad unlock balance detected! ]\n");
2882    printk( "-------------------------------------\n");
2883    printk("%s/%d is trying to release lock (",
2884        curr->comm, task_pid_nr(curr));
2885    print_lockdep_cache(lock);
2886    printk(") at:\n");
2887    print_ip_sym(ip);
2888    printk("but there are no more locks to release!\n");
2889    printk("\nother info that might help us debug this:\n");
2890    lockdep_print_held_locks(curr);
2891
2892    printk("\nstack backtrace:\n");
2893    dump_stack();
2894
2895    return 0;
2896}
2897
2898/*
2899 * Common debugging checks for both nested and non-nested unlock:
2900 */
2901static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
2902            unsigned long ip)
2903{
2904    if (unlikely(!debug_locks))
2905        return 0;
2906    if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2907        return 0;
2908
2909    if (curr->lockdep_depth <= 0)
2910        return print_unlock_inbalance_bug(curr, lock, ip);
2911
2912    return 1;
2913}
2914
2915static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
2916{
2917    if (hlock->instance == lock)
2918        return 1;
2919
2920    if (hlock->references) {
2921        struct lock_class *class = lock->class_cache;
2922
2923        if (!class)
2924            class = look_up_lock_class(lock, 0);
2925
2926        if (DEBUG_LOCKS_WARN_ON(!class))
2927            return 0;
2928
2929        if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
2930            return 0;
2931
2932        if (hlock->class_idx == class - lock_classes + 1)
2933            return 1;
2934    }
2935
2936    return 0;
2937}
2938
2939static int
2940__lock_set_class(struct lockdep_map *lock, const char *name,
2941         struct lock_class_key *key, unsigned int subclass,
2942         unsigned long ip)
2943{
2944    struct task_struct *curr = current;
2945    struct held_lock *hlock, *prev_hlock;
2946    struct lock_class *class;
2947    unsigned int depth;
2948    int i;
2949
2950    depth = curr->lockdep_depth;
2951    if (DEBUG_LOCKS_WARN_ON(!depth))
2952        return 0;
2953
2954    prev_hlock = NULL;
2955    for (i = depth-1; i >= 0; i--) {
2956        hlock = curr->held_locks + i;
2957        /*
2958         * We must not cross into another context:
2959         */
2960        if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2961            break;
2962        if (match_held_lock(hlock, lock))
2963            goto found_it;
2964        prev_hlock = hlock;
2965    }
2966    return print_unlock_inbalance_bug(curr, lock, ip);
2967
2968found_it:
2969    lockdep_init_map(lock, name, key, 0);
2970    class = register_lock_class(lock, subclass, 0);
2971    hlock->class_idx = class - lock_classes + 1;
2972
2973    curr->lockdep_depth = i;
2974    curr->curr_chain_key = hlock->prev_chain_key;
2975
2976    for (; i < depth; i++) {
2977        hlock = curr->held_locks + i;
2978        if (!__lock_acquire(hlock->instance,
2979            hlock_class(hlock)->subclass, hlock->trylock,
2980                hlock->read, hlock->check, hlock->hardirqs_off,
2981                hlock->nest_lock, hlock->acquire_ip,
2982                hlock->references))
2983            return 0;
2984    }
2985
2986    if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
2987        return 0;
2988    return 1;
2989}
2990
2991/*
2992 * Remove the lock to the list of currently held locks in a
2993 * potentially non-nested (out of order) manner. This is a
2994 * relatively rare operation, as all the unlock APIs default
2995 * to nested mode (which uses lock_release()):
2996 */
2997static int
2998lock_release_non_nested(struct task_struct *curr,
2999            struct lockdep_map *lock, unsigned long ip)
3000{
3001    struct held_lock *hlock, *prev_hlock;
3002    unsigned int depth;
3003    int i;
3004
3005    /*
3006     * Check whether the lock exists in the current stack
3007     * of held locks:
3008     */
3009    depth = curr->lockdep_depth;
3010    if (DEBUG_LOCKS_WARN_ON(!depth))
3011        return 0;
3012
3013    prev_hlock = NULL;
3014    for (i = depth-1; i >= 0; i--) {
3015        hlock = curr->held_locks + i;
3016        /*
3017         * We must not cross into another context:
3018         */
3019        if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3020            break;
3021        if (match_held_lock(hlock, lock))
3022            goto found_it;
3023        prev_hlock = hlock;
3024    }
3025    return print_unlock_inbalance_bug(curr, lock, ip);
3026
3027found_it:
3028    if (hlock->instance == lock)
3029        lock_release_holdtime(hlock);
3030
3031    if (hlock->references) {
3032        hlock->references--;
3033        if (hlock->references) {
3034            /*
3035             * We had, and after removing one, still have
3036             * references, the current lock stack is still
3037             * valid. We're done!
3038             */
3039            return 1;
3040        }
3041    }
3042
3043    /*
3044     * We have the right lock to unlock, 'hlock' points to it.
3045     * Now we remove it from the stack, and add back the other
3046     * entries (if any), recalculating the hash along the way:
3047     */
3048
3049    curr->lockdep_depth = i;
3050    curr->curr_chain_key = hlock->prev_chain_key;
3051
3052    for (i++; i < depth; i++) {
3053        hlock = curr->held_locks + i;
3054        if (!__lock_acquire(hlock->instance,
3055            hlock_class(hlock)->subclass, hlock->trylock,
3056                hlock->read, hlock->check, hlock->hardirqs_off,
3057                hlock->nest_lock, hlock->acquire_ip,
3058                hlock->references))
3059            return 0;
3060    }
3061
3062    if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
3063        return 0;
3064    return 1;
3065}
3066
3067/*
3068 * Remove the lock to the list of currently held locks - this gets
3069 * called on mutex_unlock()/spin_unlock*() (or on a failed
3070 * mutex_lock_interruptible()). This is done for unlocks that nest
3071 * perfectly. (i.e. the current top of the lock-stack is unlocked)
3072 */
3073static int lock_release_nested(struct task_struct *curr,
3074                   struct lockdep_map *lock, unsigned long ip)
3075{
3076    struct held_lock *hlock;
3077    unsigned int depth;
3078
3079    /*
3080     * Pop off the top of the lock stack:
3081     */
3082    depth = curr->lockdep_depth - 1;
3083    hlock = curr->held_locks + depth;
3084
3085    /*
3086     * Is the unlock non-nested:
3087     */
3088    if (hlock->instance != lock || hlock->references)
3089        return lock_release_non_nested(curr, lock, ip);
3090    curr->lockdep_depth--;
3091
3092    if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))
3093        return 0;
3094
3095    curr->curr_chain_key = hlock->prev_chain_key;
3096
3097    lock_release_holdtime(hlock);
3098
3099#ifdef CONFIG_DEBUG_LOCKDEP
3100    hlock->prev_chain_key = 0;
3101    hlock->class_idx = 0;
3102    hlock->acquire_ip = 0;
3103    hlock->irq_context = 0;
3104#endif
3105    return 1;
3106}
3107
3108/*
3109 * Remove the lock to the list of currently held locks - this gets
3110 * called on mutex_unlock()/spin_unlock*() (or on a failed
3111 * mutex_lock_interruptible()). This is done for unlocks that nest
3112 * perfectly. (i.e. the current top of the lock-stack is unlocked)
3113 */
3114static void
3115__lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
3116{
3117    struct task_struct *curr = current;
3118
3119    if (!check_unlock(curr, lock, ip))
3120        return;
3121
3122    if (nested) {
3123        if (!lock_release_nested(curr, lock, ip))
3124            return;
3125    } else {
3126        if (!lock_release_non_nested(curr, lock, ip))
3127            return;
3128    }
3129
3130    check_chain_key(curr);
3131}
3132
3133static int __lock_is_held(struct lockdep_map *lock)
3134{
3135    struct task_struct *curr = current;
3136    int i;
3137
3138    for (i = 0; i < curr->lockdep_depth; i++) {
3139        struct held_lock *hlock = curr->held_locks + i;
3140
3141        if (match_held_lock(hlock, lock))
3142            return 1;
3143    }
3144
3145    return 0;
3146}
3147
3148/*
3149 * Check whether we follow the irq-flags state precisely:
3150 */
3151static void check_flags(unsigned long flags)
3152{
3153#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
3154    defined(CONFIG_TRACE_IRQFLAGS)
3155    if (!debug_locks)
3156        return;
3157
3158    if (irqs_disabled_flags(flags)) {
3159        if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
3160            printk("possible reason: unannotated irqs-off.\n");
3161        }
3162    } else {
3163        if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
3164            printk("possible reason: unannotated irqs-on.\n");
3165        }
3166    }
3167
3168    /*
3169     * We dont accurately track softirq state in e.g.
3170     * hardirq contexts (such as on 4KSTACKS), so only
3171     * check if not in hardirq contexts:
3172     */
3173    if (!hardirq_count()) {
3174        if (softirq_count())
3175            DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
3176        else
3177            DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
3178    }
3179
3180    if (!debug_locks)
3181        print_irqtrace_events(current);
3182#endif
3183}
3184
3185void lock_set_class(struct lockdep_map *lock, const char *name,
3186            struct lock_class_key *key, unsigned int subclass,
3187            unsigned long ip)
3188{
3189    unsigned long flags;
3190
3191    if (unlikely(current->lockdep_recursion))
3192        return;
3193
3194    raw_local_irq_save(flags);
3195    current->lockdep_recursion = 1;
3196    check_flags(flags);
3197    if (__lock_set_class(lock, name, key, subclass, ip))
3198        check_chain_key(current);
3199    current->lockdep_recursion = 0;
3200    raw_local_irq_restore(flags);
3201}
3202EXPORT_SYMBOL_GPL(lock_set_class);
3203
3204/*
3205 * We are not always called with irqs disabled - do that here,
3206 * and also avoid lockdep recursion:
3207 */
3208void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3209              int trylock, int read, int check,
3210              struct lockdep_map *nest_lock, unsigned long ip)
3211{
3212    unsigned long flags;
3213
3214    if (unlikely(current->lockdep_recursion))
3215        return;
3216
3217    raw_local_irq_save(flags);
3218    check_flags(flags);
3219
3220    current->lockdep_recursion = 1;
3221    trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
3222    __lock_acquire(lock, subclass, trylock, read, check,
3223               irqs_disabled_flags(flags), nest_lock, ip, 0);
3224    current->lockdep_recursion = 0;
3225    raw_local_irq_restore(flags);
3226}
3227EXPORT_SYMBOL_GPL(lock_acquire);
3228
3229void lock_release(struct lockdep_map *lock, int nested,
3230              unsigned long ip)
3231{
3232    unsigned long flags;
3233
3234    if (unlikely(current->lockdep_recursion))
3235        return;
3236
3237    raw_local_irq_save(flags);
3238    check_flags(flags);
3239    current->lockdep_recursion = 1;
3240    trace_lock_release(lock, ip);
3241    __lock_release(lock, nested, ip);
3242    current->lockdep_recursion = 0;
3243    raw_local_irq_restore(flags);
3244}
3245EXPORT_SYMBOL_GPL(lock_release);
3246
3247int lock_is_held(struct lockdep_map *lock)
3248{
3249    unsigned long flags;
3250    int ret = 0;
3251
3252    if (unlikely(current->lockdep_recursion))
3253        return ret;
3254
3255    raw_local_irq_save(flags);
3256    check_flags(flags);
3257
3258    current->lockdep_recursion = 1;
3259    ret = __lock_is_held(lock);
3260    current->lockdep_recursion = 0;
3261    raw_local_irq_restore(flags);
3262
3263    return ret;
3264}
3265EXPORT_SYMBOL_GPL(lock_is_held);
3266
3267void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
3268{
3269    current->lockdep_reclaim_gfp = gfp_mask;
3270}
3271
3272void lockdep_clear_current_reclaim_state(void)
3273{
3274    current->lockdep_reclaim_gfp = 0;
3275}
3276
3277#ifdef CONFIG_LOCK_STAT
3278static int
3279print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
3280               unsigned long ip)
3281{
3282    if (!debug_locks_off())
3283        return 0;
3284    if (debug_locks_silent)
3285        return 0;
3286
3287    printk("\n=================================\n");
3288    printk( "[ BUG: bad contention detected! ]\n");
3289    printk( "---------------------------------\n");
3290    printk("%s/%d is trying to contend lock (",
3291        curr->comm, task_pid_nr(curr));
3292    print_lockdep_cache(lock);
3293    printk(") at:\n");
3294    print_ip_sym(ip);
3295    printk("but there are no locks held!\n");
3296    printk("\nother info that might help us debug this:\n");
3297    lockdep_print_held_locks(curr);
3298
3299    printk("\nstack backtrace:\n");
3300    dump_stack();
3301
3302    return 0;
3303}
3304
3305static void
3306__lock_contended(struct lockdep_map *lock, unsigned long ip)
3307{
3308    struct task_struct *curr = current;
3309    struct held_lock *hlock, *prev_hlock;
3310    struct lock_class_stats *stats;
3311    unsigned int depth;
3312    int i, contention_point, contending_point;
3313
3314    depth = curr->lockdep_depth;
3315    if (DEBUG_LOCKS_WARN_ON(!depth))
3316        return;
3317
3318    prev_hlock = NULL;
3319    for (i = depth-1; i >= 0; i--) {
3320        hlock = curr->held_locks + i;
3321        /*
3322         * We must not cross into another context:
3323         */
3324        if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3325            break;
3326        if (match_held_lock(hlock, lock))
3327            goto found_it;
3328        prev_hlock = hlock;
3329    }
3330    print_lock_contention_bug(curr, lock, ip);
3331    return;
3332
3333found_it:
3334    if (hlock->instance != lock)
3335        return;
3336
3337    hlock->waittime_stamp = lockstat_clock();
3338
3339    contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
3340    contending_point = lock_point(hlock_class(hlock)->contending_point,
3341                      lock->ip);
3342
3343    stats = get_lock_stats(hlock_class(hlock));
3344    if (contention_point < LOCKSTAT_POINTS)
3345        stats->contention_point[contention_point]++;
3346    if (contending_point < LOCKSTAT_POINTS)
3347        stats->contending_point[contending_point]++;
3348    if (lock->cpu != smp_processor_id())
3349        stats->bounces[bounce_contended + !!hlock->read]++;
3350    put_lock_stats(stats);
3351}
3352
3353static void
3354__lock_acquired(struct lockdep_map *lock, unsigned long ip)
3355{
3356    struct task_struct *curr = current;
3357    struct held_lock *hlock, *prev_hlock;
3358    struct lock_class_stats *stats;
3359    unsigned int depth;
3360    u64 now, waittime = 0;
3361    int i, cpu;
3362
3363    depth = curr->lockdep_depth;
3364    if (DEBUG_LOCKS_WARN_ON(!depth))
3365        return;
3366
3367    prev_hlock = NULL;
3368    for (i = depth-1; i >= 0; i--) {
3369        hlock = curr->held_locks + i;
3370        /*
3371         * We must not cross into another context:
3372         */
3373        if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3374            break;
3375        if (match_held_lock(hlock, lock))
3376            goto found_it;
3377        prev_hlock = hlock;
3378    }
3379    print_lock_contention_bug(curr, lock, _RET_IP_);
3380    return;
3381
3382found_it:
3383    if (hlock->instance != lock)
3384        return;
3385
3386    cpu = smp_processor_id();
3387    if (hlock->waittime_stamp) {
3388        now = lockstat_clock();
3389        waittime = now - hlock->waittime_stamp;
3390        hlock->holdtime_stamp = now;
3391    }
3392
3393    trace_lock_acquired(lock, ip);
3394
3395    stats = get_lock_stats(hlock_class(hlock));
3396    if (waittime) {
3397        if (hlock->read)
3398            lock_time_inc(&stats->read_waittime, waittime);
3399        else
3400            lock_time_inc(&stats->write_waittime, waittime);
3401    }
3402    if (lock->cpu != cpu)
3403        stats->bounces[bounce_acquired + !!hlock->read]++;
3404    put_lock_stats(stats);
3405
3406    lock->cpu = cpu;
3407    lock->ip = ip;
3408}
3409
3410void lock_contended(struct lockdep_map *lock, unsigned long ip)
3411{
3412    unsigned long flags;
3413
3414    if (unlikely(!lock_stat))
3415        return;
3416
3417    if (unlikely(current->lockdep_recursion))
3418        return;
3419
3420    raw_local_irq_save(flags);
3421    check_flags(flags);
3422    current->lockdep_recursion = 1;
3423    trace_lock_contended(lock, ip);
3424    __lock_contended(lock, ip);
3425    current->lockdep_recursion = 0;
3426    raw_local_irq_restore(flags);
3427}
3428EXPORT_SYMBOL_GPL(lock_contended);
3429
3430void lock_acquired(struct lockdep_map *lock, unsigned long ip)
3431{
3432    unsigned long flags;
3433
3434    if (unlikely(!lock_stat))
3435        return;
3436
3437    if (unlikely(current->lockdep_recursion))
3438        return;
3439
3440    raw_local_irq_save(flags);
3441    check_flags(flags);
3442    current->lockdep_recursion = 1;
3443    __lock_acquired(lock, ip);
3444    current->lockdep_recursion = 0;
3445    raw_local_irq_restore(flags);
3446}
3447EXPORT_SYMBOL_GPL(lock_acquired);
3448#endif
3449
3450/*
3451 * Used by the testsuite, sanitize the validator state
3452 * after a simulated failure:
3453 */
3454
3455void lockdep_reset(void)
3456{
3457    unsigned long flags;
3458    int i;
3459
3460    raw_local_irq_save(flags);
3461    current->curr_chain_key = 0;
3462    current->lockdep_depth = 0;
3463    current->lockdep_recursion = 0;
3464    memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
3465    nr_hardirq_chains = 0;
3466    nr_softirq_chains = 0;
3467    nr_process_chains = 0;
3468    debug_locks = 1;
3469    for (i = 0; i < CHAINHASH_SIZE; i++)
3470        INIT_LIST_HEAD(chainhash_table + i);
3471    raw_local_irq_restore(flags);
3472}
3473
3474static void zap_class(struct lock_class *class)
3475{
3476    int i;
3477
3478    /*
3479     * Remove all dependencies this lock is
3480     * involved in:
3481     */
3482    for (i = 0; i < nr_list_entries; i++) {
3483        if (list_entries[i].class == class)
3484            list_del_rcu(&list_entries[i].entry);
3485    }
3486    /*
3487     * Unhash the class and remove it from the all_lock_classes list:
3488     */
3489    list_del_rcu(&class->hash_entry);
3490    list_del_rcu(&class->lock_entry);
3491
3492    class->key = NULL;
3493}
3494
3495static inline int within(const void *addr, void *start, unsigned long size)
3496{
3497    return addr >= start && addr < start + size;
3498}
3499
3500void lockdep_free_key_range(void *start, unsigned long size)
3501{
3502    struct lock_class *class, *next;
3503    struct list_head *head;
3504    unsigned long flags;
3505    int i;
3506    int locked;
3507
3508    raw_local_irq_save(flags);
3509    locked = graph_lock();
3510
3511    /*
3512     * Unhash all classes that were created by this module:
3513     */
3514    for (i = 0; i < CLASSHASH_SIZE; i++) {
3515        head = classhash_table + i;
3516        if (list_empty(head))
3517            continue;
3518        list_for_each_entry_safe(class, next, head, hash_entry) {
3519            if (within(class->key, start, size))
3520                zap_class(class);
3521            else if (within(class->name, start, size))
3522                zap_class(class);
3523        }
3524    }
3525
3526    if (locked)
3527        graph_unlock();
3528    raw_local_irq_restore(flags);
3529}
3530
3531void lockdep_reset_lock(struct lockdep_map *lock)
3532{
3533    struct lock_class *class, *next;
3534    struct list_head *head;
3535    unsigned long flags;
3536    int i, j;
3537    int locked;
3538
3539    raw_local_irq_save(flags);
3540
3541    /*
3542     * Remove all classes this lock might have:
3543     */
3544    for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
3545        /*
3546         * If the class exists we look it up and zap it:
3547         */
3548        class = look_up_lock_class(lock, j);
3549        if (class)
3550            zap_class(class);
3551    }
3552    /*
3553     * Debug check: in the end all mapped classes should
3554     * be gone.
3555     */
3556    locked = graph_lock();
3557    for (i = 0; i < CLASSHASH_SIZE; i++) {
3558        head = classhash_table + i;
3559        if (list_empty(head))
3560            continue;
3561        list_for_each_entry_safe(class, next, head, hash_entry) {
3562            if (unlikely(class == lock->class_cache)) {
3563                if (debug_locks_off_graph_unlock())
3564                    WARN_ON(1);
3565                goto out_restore;
3566            }
3567        }
3568    }
3569    if (locked)
3570        graph_unlock();
3571
3572out_restore:
3573    raw_local_irq_restore(flags);
3574}
3575
3576void lockdep_init(void)
3577{
3578    int i;
3579
3580    /*
3581     * Some architectures have their own start_kernel()
3582     * code which calls lockdep_init(), while we also
3583     * call lockdep_init() from the start_kernel() itself,
3584     * and we want to initialize the hashes only once:
3585     */
3586    if (lockdep_initialized)
3587        return;
3588
3589    for (i = 0; i < CLASSHASH_SIZE; i++)
3590        INIT_LIST_HEAD(classhash_table + i);
3591
3592    for (i = 0; i < CHAINHASH_SIZE; i++)
3593        INIT_LIST_HEAD(chainhash_table + i);
3594
3595    lockdep_initialized = 1;
3596}
3597
3598void __init lockdep_info(void)
3599{
3600    printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
3601
3602    printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES);
3603    printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH);
3604    printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS);
3605    printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE);
3606    printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES);
3607    printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS);
3608    printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE);
3609
3610    printk(" memory used by lock dependency info: %lu kB\n",
3611        (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS +
3612        sizeof(struct list_head) * CLASSHASH_SIZE +
3613        sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
3614        sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
3615        sizeof(struct list_head) * CHAINHASH_SIZE
3616#ifdef CONFIG_PROVE_LOCKING
3617        + sizeof(struct circular_queue)
3618#endif
3619        ) / 1024
3620        );
3621
3622    printk(" per task-struct memory footprint: %lu bytes\n",
3623        sizeof(struct held_lock) * MAX_LOCK_DEPTH);
3624
3625#ifdef CONFIG_DEBUG_LOCKDEP
3626    if (lockdep_init_error) {
3627        printk("WARNING: lockdep init error! Arch code didn't call lockdep_init() early enough?\n");
3628        printk("Call stack leading to lockdep invocation was:\n");
3629        print_stack_trace(&lockdep_init_trace, 0);
3630    }
3631#endif
3632}
3633
3634static void
3635print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
3636             const void *mem_to, struct held_lock *hlock)
3637{
3638    if (!debug_locks_off())
3639        return;
3640    if (debug_locks_silent)
3641        return;
3642
3643    printk("\n=========================\n");
3644    printk( "[ BUG: held lock freed! ]\n");
3645    printk( "-------------------------\n");
3646    printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
3647        curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
3648    print_lock(hlock);
3649    lockdep_print_held_locks(curr);
3650
3651    printk("\nstack backtrace:\n");
3652    dump_stack();
3653}
3654
3655static inline int not_in_range(const void* mem_from, unsigned long mem_len,
3656                const void* lock_from, unsigned long lock_len)
3657{
3658    return lock_from + lock_len <= mem_from ||
3659        mem_from + mem_len <= lock_from;
3660}
3661
3662/*
3663 * Called when kernel memory is freed (or unmapped), or if a lock
3664 * is destroyed or reinitialized - this code checks whether there is
3665 * any held lock in the memory range of <from> to <to>:
3666 */
3667void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
3668{
3669    struct task_struct *curr = current;
3670    struct held_lock *hlock;
3671    unsigned long flags;
3672    int i;
3673
3674    if (unlikely(!debug_locks))
3675        return;
3676
3677    local_irq_save(flags);
3678    for (i = 0; i < curr->lockdep_depth; i++) {
3679        hlock = curr->held_locks + i;
3680
3681        if (not_in_range(mem_from, mem_len, hlock->instance,
3682                    sizeof(*hlock->instance)))
3683            continue;
3684
3685        print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
3686        break;
3687    }
3688    local_irq_restore(flags);
3689}
3690EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
3691
3692static void print_held_locks_bug(struct task_struct *curr)
3693{
3694    if (!debug_locks_off())
3695        return;
3696    if (debug_locks_silent)
3697        return;
3698
3699    printk("\n=====================================\n");
3700    printk( "[ BUG: lock held at task exit time! ]\n");
3701    printk( "-------------------------------------\n");
3702    printk("%s/%d is exiting with locks still held!\n",
3703        curr->comm, task_pid_nr(curr));
3704    lockdep_print_held_locks(curr);
3705
3706    printk("\nstack backtrace:\n");
3707    dump_stack();
3708}
3709
3710void debug_check_no_locks_held(struct task_struct *task)
3711{
3712    if (unlikely(task->lockdep_depth > 0))
3713        print_held_locks_bug(task);
3714}
3715
3716void debug_show_all_locks(void)
3717{
3718    struct task_struct *g, *p;
3719    int count = 10;
3720    int unlock = 1;
3721
3722    if (unlikely(!debug_locks)) {
3723        printk("INFO: lockdep is turned off.\n");
3724        return;
3725    }
3726    printk("\nShowing all locks held in the system:\n");
3727
3728    /*
3729     * Here we try to get the tasklist_lock as hard as possible,
3730     * if not successful after 2 seconds we ignore it (but keep
3731     * trying). This is to enable a debug printout even if a
3732     * tasklist_lock-holding task deadlocks or crashes.
3733     */
3734retry:
3735    if (!read_trylock(&tasklist_lock)) {
3736        if (count == 10)
3737            printk("hm, tasklist_lock locked, retrying... ");
3738        if (count) {
3739            count--;
3740            printk(" #%d", 10-count);
3741            mdelay(200);
3742            goto retry;
3743        }
3744        printk(" ignoring it.\n");
3745        unlock = 0;
3746    } else {
3747        if (count != 10)
3748            printk(KERN_CONT " locked it.\n");
3749    }
3750
3751    do_each_thread(g, p) {
3752        /*
3753         * It's not reliable to print a task's held locks
3754         * if it's not sleeping (or if it's not the current
3755         * task):
3756         */
3757        if (p->state == TASK_RUNNING && p != current)
3758            continue;
3759        if (p->lockdep_depth)
3760            lockdep_print_held_locks(p);
3761        if (!unlock)
3762            if (read_trylock(&tasklist_lock))
3763                unlock = 1;
3764    } while_each_thread(g, p);
3765
3766    printk("\n");
3767    printk("=============================================\n\n");
3768
3769    if (unlock)
3770        read_unlock(&tasklist_lock);
3771}
3772EXPORT_SYMBOL_GPL(debug_show_all_locks);
3773
3774/*
3775 * Careful: only use this function if you are sure that
3776 * the task cannot run in parallel!
3777 */
3778void __debug_show_held_locks(struct task_struct *task)
3779{
3780    if (unlikely(!debug_locks)) {
3781        printk("INFO: lockdep is turned off.\n");
3782        return;
3783    }
3784    lockdep_print_held_locks(task);
3785}
3786EXPORT_SYMBOL_GPL(__debug_show_held_locks);
3787
3788void debug_show_held_locks(struct task_struct *task)
3789{
3790        __debug_show_held_locks(task);
3791}
3792EXPORT_SYMBOL_GPL(debug_show_held_locks);
3793
3794void lockdep_sys_exit(void)
3795{
3796    struct task_struct *curr = current;
3797
3798    if (unlikely(curr->lockdep_depth)) {
3799        if (!debug_locks_off())
3800            return;
3801        printk("\n================================================\n");
3802        printk( "[ BUG: lock held when returning to user space! ]\n");
3803        printk( "------------------------------------------------\n");
3804        printk("%s/%d is leaving the kernel with locks still held!\n",
3805                curr->comm, curr->pid);
3806        lockdep_print_held_locks(curr);
3807    }
3808}
3809
3810void lockdep_rcu_dereference(const char *file, const int line)
3811{
3812    struct task_struct *curr = current;
3813
3814#ifndef CONFIG_PROVE_RCU_REPEATEDLY
3815    if (!debug_locks_off())
3816        return;
3817#endif /* #ifdef CONFIG_PROVE_RCU_REPEATEDLY */
3818    /* Note: the following can be executed concurrently, so be careful. */
3819    printk("\n===================================================\n");
3820    printk( "[ INFO: suspicious rcu_dereference_check() usage. ]\n");
3821    printk( "---------------------------------------------------\n");
3822    printk("%s:%d invoked rcu_dereference_check() without protection!\n",
3823            file, line);
3824    printk("\nother info that might help us debug this:\n\n");
3825    printk("\nrcu_scheduler_active = %d, debug_locks = %d\n", rcu_scheduler_active, debug_locks);
3826    lockdep_print_held_locks(curr);
3827    printk("\nstack backtrace:\n");
3828    dump_stack();
3829}
3830EXPORT_SYMBOL_GPL(lockdep_rcu_dereference);
3831

Archive Download this file



interactive