blob: 7cc790a262def0570ac9a3268119e972a3f114dd [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * kernel/lockdep.c
4 *
5 * Runtime locking correctness validator
6 *
7 * Started by Ingo Molnar:
8 *
9 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
11 *
12 * this code maps all the lock dependencies as they occur in a live kernel
13 * and will warn about the following classes of locking bugs:
14 *
15 * - lock inversion scenarios
16 * - circular lock dependencies
17 * - hardirq/softirq safe/unsafe locking bugs
18 *
19 * Bugs are reported even if the current locking scenario does not cause
20 * any deadlock at this point.
21 *
22 * I.e. if anytime in the past two locks were taken in a different order,
23 * even if it happened for another task, even if those were different
24 * locks (but of the same class as this lock), this code will detect it.
25 *
26 * Thanks to Arjan van de Ven for coming up with the initial idea of
27 * mapping lock dependencies runtime.
28 */
29#define DISABLE_BRANCH_PROFILING
30#include <linux/mutex.h>
31#include <linux/sched.h>
32#include <linux/sched/clock.h>
33#include <linux/sched/task.h>
34#include <linux/sched/mm.h>
35#include <linux/delay.h>
36#include <linux/module.h>
37#include <linux/proc_fs.h>
38#include <linux/seq_file.h>
39#include <linux/spinlock.h>
40#include <linux/kallsyms.h>
41#include <linux/interrupt.h>
42#include <linux/stacktrace.h>
43#include <linux/debug_locks.h>
44#include <linux/irqflags.h>
45#include <linux/utsname.h>
46#include <linux/hash.h>
47#include <linux/ftrace.h>
48#include <linux/stringify.h>
49#include <linux/bitmap.h>
50#include <linux/bitops.h>
51#include <linux/gfp.h>
52#include <linux/random.h>
53#include <linux/jhash.h>
54#include <linux/nmi.h>
55#include <linux/rcupdate.h>
56#include <linux/kprobes.h>
57
58#include <asm/sections.h>
59
60#include "lockdep_internals.h"
61
62#define CREATE_TRACE_POINTS
63#include <trace/events/lock.h>
64
65#ifdef CONFIG_PROVE_LOCKING
66int prove_locking = 1;
67module_param(prove_locking, int, 0644);
68#else
69#define prove_locking 0
70#endif
71
72#ifdef CONFIG_LOCK_STAT
73int lock_stat = 1;
74module_param(lock_stat, int, 0644);
75#else
76#define lock_stat 0
77#endif
78
79/*
80 * lockdep_lock: protects the lockdep graph, the hashes and the
81 * class/list/hash allocators.
82 *
83 * This is one of the rare exceptions where it's justified
84 * to use a raw spinlock - we really dont want the spinlock
85 * code to recurse back into the lockdep code...
86 */
87static arch_spinlock_t __lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
88static struct task_struct *__owner;
89
90static inline void lockdep_lock(void)
91{
92 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
93
94 arch_spin_lock(&__lock);
95 __owner = current;
96 current->lockdep_recursion++;
97}
98
99static inline void lockdep_unlock(void)
100{
101 if (debug_locks && DEBUG_LOCKS_WARN_ON(__owner != current))
102 return;
103
104 current->lockdep_recursion--;
105 __owner = NULL;
106 arch_spin_unlock(&__lock);
107}
108
109static inline bool lockdep_assert_locked(void)
110{
111 return DEBUG_LOCKS_WARN_ON(__owner != current);
112}
113
114static struct task_struct *lockdep_selftest_task_struct;
115
116
117static int graph_lock(void)
118{
119 lockdep_lock();
120 /*
121 * Make sure that if another CPU detected a bug while
122 * walking the graph we dont change it (while the other
123 * CPU is busy printing out stuff with the graph lock
124 * dropped already)
125 */
126 if (!debug_locks) {
127 lockdep_unlock();
128 return 0;
129 }
130 return 1;
131}
132
133static inline void graph_unlock(void)
134{
135 lockdep_unlock();
136}
137
138/*
139 * Turn lock debugging off and return with 0 if it was off already,
140 * and also release the graph lock:
141 */
142static inline int debug_locks_off_graph_unlock(void)
143{
144 int ret = debug_locks_off();
145
146 lockdep_unlock();
147
148 return ret;
149}
150
151unsigned long nr_list_entries;
152static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
153static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES);
154
155/*
156 * All data structures here are protected by the global debug_lock.
157 *
158 * nr_lock_classes is the number of elements of lock_classes[] that is
159 * in use.
160 */
161#define KEYHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
162#define KEYHASH_SIZE (1UL << KEYHASH_BITS)
163static struct hlist_head lock_keys_hash[KEYHASH_SIZE];
164unsigned long nr_lock_classes;
165#ifndef CONFIG_DEBUG_LOCKDEP
166static
167#endif
168struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
169static DECLARE_BITMAP(lock_classes_in_use, MAX_LOCKDEP_KEYS);
170
171static inline struct lock_class *hlock_class(struct held_lock *hlock)
172{
173 unsigned int class_idx = hlock->class_idx;
174
175 /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfield */
176 barrier();
177
178 if (!test_bit(class_idx, lock_classes_in_use)) {
179 /*
180 * Someone passed in garbage, we give up.
181 */
182 DEBUG_LOCKS_WARN_ON(1);
183 return NULL;
184 }
185
186 /*
187 * At this point, if the passed hlock->class_idx is still garbage,
188 * we just have to live with it
189 */
190 return lock_classes + class_idx;
191}
192
193#ifdef CONFIG_LOCK_STAT
194static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats);
195
196static inline u64 lockstat_clock(void)
197{
198 return local_clock();
199}
200
201static int lock_point(unsigned long points[], unsigned long ip)
202{
203 int i;
204
205 for (i = 0; i < LOCKSTAT_POINTS; i++) {
206 if (points[i] == 0) {
207 points[i] = ip;
208 break;
209 }
210 if (points[i] == ip)
211 break;
212 }
213
214 return i;
215}
216
217static void lock_time_inc(struct lock_time *lt, u64 time)
218{
219 if (time > lt->max)
220 lt->max = time;
221
222 if (time < lt->min || !lt->nr)
223 lt->min = time;
224
225 lt->total += time;
226 lt->nr++;
227}
228
229static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
230{
231 if (!src->nr)
232 return;
233
234 if (src->max > dst->max)
235 dst->max = src->max;
236
237 if (src->min < dst->min || !dst->nr)
238 dst->min = src->min;
239
240 dst->total += src->total;
241 dst->nr += src->nr;
242}
243
244struct lock_class_stats lock_stats(struct lock_class *class)
245{
246 struct lock_class_stats stats;
247 int cpu, i;
248
249 memset(&stats, 0, sizeof(struct lock_class_stats));
250 for_each_possible_cpu(cpu) {
251 struct lock_class_stats *pcs =
252 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
253
254 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
255 stats.contention_point[i] += pcs->contention_point[i];
256
257 for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
258 stats.contending_point[i] += pcs->contending_point[i];
259
260 lock_time_add(&pcs->read_waittime, &stats.read_waittime);
261 lock_time_add(&pcs->write_waittime, &stats.write_waittime);
262
263 lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
264 lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
265
266 for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
267 stats.bounces[i] += pcs->bounces[i];
268 }
269
270 return stats;
271}
272
273void clear_lock_stats(struct lock_class *class)
274{
275 int cpu;
276
277 for_each_possible_cpu(cpu) {
278 struct lock_class_stats *cpu_stats =
279 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
280
281 memset(cpu_stats, 0, sizeof(struct lock_class_stats));
282 }
283 memset(class->contention_point, 0, sizeof(class->contention_point));
284 memset(class->contending_point, 0, sizeof(class->contending_point));
285}
286
287static struct lock_class_stats *get_lock_stats(struct lock_class *class)
288{
289 return &this_cpu_ptr(cpu_lock_stats)[class - lock_classes];
290}
291
292static void lock_release_holdtime(struct held_lock *hlock)
293{
294 struct lock_class_stats *stats;
295 u64 holdtime;
296
297 if (!lock_stat)
298 return;
299
300 holdtime = lockstat_clock() - hlock->holdtime_stamp;
301
302 stats = get_lock_stats(hlock_class(hlock));
303 if (hlock->read)
304 lock_time_inc(&stats->read_holdtime, holdtime);
305 else
306 lock_time_inc(&stats->write_holdtime, holdtime);
307}
308#else
309static inline void lock_release_holdtime(struct held_lock *hlock)
310{
311}
312#endif
313
314/*
315 * We keep a global list of all lock classes. The list is only accessed with
316 * the lockdep spinlock lock held. free_lock_classes is a list with free
317 * elements. These elements are linked together by the lock_entry member in
318 * struct lock_class.
319 */
320LIST_HEAD(all_lock_classes);
321static LIST_HEAD(free_lock_classes);
322
323/**
324 * struct pending_free - information about data structures about to be freed
325 * @zapped: Head of a list with struct lock_class elements.
326 * @lock_chains_being_freed: Bitmap that indicates which lock_chains[] elements
327 * are about to be freed.
328 */
329struct pending_free {
330 struct list_head zapped;
331 DECLARE_BITMAP(lock_chains_being_freed, MAX_LOCKDEP_CHAINS);
332};
333
334/**
335 * struct delayed_free - data structures used for delayed freeing
336 *
337 * A data structure for delayed freeing of data structures that may be
338 * accessed by RCU readers at the time these were freed.
339 *
340 * @rcu_head: Used to schedule an RCU callback for freeing data structures.
341 * @index: Index of @pf to which freed data structures are added.
342 * @scheduled: Whether or not an RCU callback has been scheduled.
343 * @pf: Array with information about data structures about to be freed.
344 */
345static struct delayed_free {
346 struct rcu_head rcu_head;
347 int index;
348 int scheduled;
349 struct pending_free pf[2];
350} delayed_free;
351
352/*
353 * The lockdep classes are in a hash-table as well, for fast lookup:
354 */
355#define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
356#define CLASSHASH_SIZE (1UL << CLASSHASH_BITS)
357#define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS)
358#define classhashentry(key) (classhash_table + __classhashfn((key)))
359
360static struct hlist_head classhash_table[CLASSHASH_SIZE];
361
362/*
363 * We put the lock dependency chains into a hash-table as well, to cache
364 * their existence:
365 */
366#define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1)
367#define CHAINHASH_SIZE (1UL << CHAINHASH_BITS)
368#define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS)
369#define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
370
371static struct hlist_head chainhash_table[CHAINHASH_SIZE];
372
373/*
374 * The hash key of the lock dependency chains is a hash itself too:
375 * it's a hash of all locks taken up to that lock, including that lock.
376 * It's a 64-bit hash, because it's important for the keys to be
377 * unique.
378 */
379static inline u64 iterate_chain_key(u64 key, u32 idx)
380{
381 u32 k0 = key, k1 = key >> 32;
382
383 __jhash_mix(idx, k0, k1); /* Macro that modifies arguments! */
384
385 return k0 | (u64)k1 << 32;
386}
387
388void lockdep_init_task(struct task_struct *task)
389{
390 task->lockdep_depth = 0; /* no locks held yet */
391 task->curr_chain_key = INITIAL_CHAIN_KEY;
392 task->lockdep_recursion = 0;
393}
394
395void lockdep_off(void)
396{
397 current->lockdep_recursion++;
398}
399EXPORT_SYMBOL(lockdep_off);
400
401void lockdep_on(void)
402{
403 current->lockdep_recursion--;
404}
405EXPORT_SYMBOL(lockdep_on);
406
407static inline void lockdep_recursion_finish(void)
408{
409 if (WARN_ON_ONCE(--current->lockdep_recursion))
410 current->lockdep_recursion = 0;
411}
412
413void lockdep_set_selftest_task(struct task_struct *task)
414{
415 lockdep_selftest_task_struct = task;
416}
417
418/*
419 * Debugging switches:
420 */
421
422#define VERBOSE 0
423#define VERY_VERBOSE 0
424
425#if VERBOSE
426# define HARDIRQ_VERBOSE 1
427# define SOFTIRQ_VERBOSE 1
428#else
429# define HARDIRQ_VERBOSE 0
430# define SOFTIRQ_VERBOSE 0
431#endif
432
433#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
434/*
435 * Quick filtering for interesting events:
436 */
437static int class_filter(struct lock_class *class)
438{
439#if 0
440 /* Example */
441 if (class->name_version == 1 &&
442 !strcmp(class->name, "lockname"))
443 return 1;
444 if (class->name_version == 1 &&
445 !strcmp(class->name, "&struct->lockfield"))
446 return 1;
447#endif
448 /* Filter everything else. 1 would be to allow everything else */
449 return 0;
450}
451#endif
452
453static int verbose(struct lock_class *class)
454{
455#if VERBOSE
456 return class_filter(class);
457#endif
458 return 0;
459}
460
461static void print_lockdep_off(const char *bug_msg)
462{
463 printk(KERN_DEBUG "%s\n", bug_msg);
464 printk(KERN_DEBUG "turning off the locking correctness validator.\n");
465#ifdef CONFIG_LOCK_STAT
466 printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n");
467#endif
468}
469
470unsigned long nr_stack_trace_entries;
471
472#ifdef CONFIG_PROVE_LOCKING
473/**
474 * struct lock_trace - single stack backtrace
475 * @hash_entry: Entry in a stack_trace_hash[] list.
476 * @hash: jhash() of @entries.
477 * @nr_entries: Number of entries in @entries.
478 * @entries: Actual stack backtrace.
479 */
480struct lock_trace {
481 struct hlist_node hash_entry;
482 u32 hash;
483 u32 nr_entries;
484 unsigned long entries[0] __aligned(sizeof(unsigned long));
485};
486#define LOCK_TRACE_SIZE_IN_LONGS \
487 (sizeof(struct lock_trace) / sizeof(unsigned long))
488/*
489 * Stack-trace: sequence of lock_trace structures. Protected by the graph_lock.
490 */
491static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
492static struct hlist_head stack_trace_hash[STACK_TRACE_HASH_SIZE];
493
494static bool traces_identical(struct lock_trace *t1, struct lock_trace *t2)
495{
496 return t1->hash == t2->hash && t1->nr_entries == t2->nr_entries &&
497 memcmp(t1->entries, t2->entries,
498 t1->nr_entries * sizeof(t1->entries[0])) == 0;
499}
500
501static struct lock_trace *save_trace(void)
502{
503 struct lock_trace *trace, *t2;
504 struct hlist_head *hash_head;
505 u32 hash;
506 int max_entries;
507
508 BUILD_BUG_ON_NOT_POWER_OF_2(STACK_TRACE_HASH_SIZE);
509 BUILD_BUG_ON(LOCK_TRACE_SIZE_IN_LONGS >= MAX_STACK_TRACE_ENTRIES);
510
511 trace = (struct lock_trace *)(stack_trace + nr_stack_trace_entries);
512 max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries -
513 LOCK_TRACE_SIZE_IN_LONGS;
514
515 if (max_entries <= 0) {
516 if (!debug_locks_off_graph_unlock())
517 return NULL;
518
519 print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
520 dump_stack();
521
522 return NULL;
523 }
524 trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3);
525
526 hash = jhash(trace->entries, trace->nr_entries *
527 sizeof(trace->entries[0]), 0);
528 trace->hash = hash;
529 hash_head = stack_trace_hash + (hash & (STACK_TRACE_HASH_SIZE - 1));
530 hlist_for_each_entry(t2, hash_head, hash_entry) {
531 if (traces_identical(trace, t2))
532 return t2;
533 }
534 nr_stack_trace_entries += LOCK_TRACE_SIZE_IN_LONGS + trace->nr_entries;
535 hlist_add_head(&trace->hash_entry, hash_head);
536
537 return trace;
538}
539
540/* Return the number of stack traces in the stack_trace[] array. */
541u64 lockdep_stack_trace_count(void)
542{
543 struct lock_trace *trace;
544 u64 c = 0;
545 int i;
546
547 for (i = 0; i < ARRAY_SIZE(stack_trace_hash); i++) {
548 hlist_for_each_entry(trace, &stack_trace_hash[i], hash_entry) {
549 c++;
550 }
551 }
552
553 return c;
554}
555
556/* Return the number of stack hash chains that have at least one stack trace. */
557u64 lockdep_stack_hash_count(void)
558{
559 u64 c = 0;
560 int i;
561
562 for (i = 0; i < ARRAY_SIZE(stack_trace_hash); i++)
563 if (!hlist_empty(&stack_trace_hash[i]))
564 c++;
565
566 return c;
567}
568#endif
569
570unsigned int nr_hardirq_chains;
571unsigned int nr_softirq_chains;
572unsigned int nr_process_chains;
573unsigned int max_lockdep_depth;
574
575#ifdef CONFIG_DEBUG_LOCKDEP
576/*
577 * Various lockdep statistics:
578 */
579DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
580#endif
581
582#ifdef CONFIG_PROVE_LOCKING
583/*
584 * Locking printouts:
585 */
586
587#define __USAGE(__STATE) \
588 [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \
589 [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \
590 [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
591 [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
592
593static const char *usage_str[] =
594{
595#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
596#include "lockdep_states.h"
597#undef LOCKDEP_STATE
598 [LOCK_USED] = "INITIAL USE",
599};
600#endif
601
602const char *__get_key_name(const struct lockdep_subclass_key *key, char *str)
603{
604 return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
605}
606
607static inline unsigned long lock_flag(enum lock_usage_bit bit)
608{
609 return 1UL << bit;
610}
611
612static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
613{
614 /*
615 * The usage character defaults to '.' (i.e., irqs disabled and not in
616 * irq context), which is the safest usage category.
617 */
618 char c = '.';
619
620 /*
621 * The order of the following usage checks matters, which will
622 * result in the outcome character as follows:
623 *
624 * - '+': irq is enabled and not in irq context
625 * - '-': in irq context and irq is disabled
626 * - '?': in irq context and irq is enabled
627 */
628 if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK)) {
629 c = '+';
630 if (class->usage_mask & lock_flag(bit))
631 c = '?';
632 } else if (class->usage_mask & lock_flag(bit))
633 c = '-';
634
635 return c;
636}
637
638void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
639{
640 int i = 0;
641
642#define LOCKDEP_STATE(__STATE) \
643 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \
644 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
645#include "lockdep_states.h"
646#undef LOCKDEP_STATE
647
648 usage[i] = '\0';
649}
650
651static void __print_lock_name(struct lock_class *class)
652{
653 char str[KSYM_NAME_LEN];
654 const char *name;
655
656 name = class->name;
657 if (!name) {
658 name = __get_key_name(class->key, str);
659 printk(KERN_CONT "%s", name);
660 } else {
661 printk(KERN_CONT "%s", name);
662 if (class->name_version > 1)
663 printk(KERN_CONT "#%d", class->name_version);
664 if (class->subclass)
665 printk(KERN_CONT "/%d", class->subclass);
666 }
667}
668
669static void print_lock_name(struct lock_class *class)
670{
671 char usage[LOCK_USAGE_CHARS];
672
673 get_usage_chars(class, usage);
674
675 printk(KERN_CONT " (");
676 __print_lock_name(class);
677 printk(KERN_CONT "){%s}", usage);
678}
679
680static void print_lockdep_cache(struct lockdep_map *lock)
681{
682 const char *name;
683 char str[KSYM_NAME_LEN];
684
685 name = lock->name;
686 if (!name)
687 name = __get_key_name(lock->key->subkeys, str);
688
689 printk(KERN_CONT "%s", name);
690}
691
692static void print_lock(struct held_lock *hlock)
693{
694 /*
695 * We can be called locklessly through debug_show_all_locks() so be
696 * extra careful, the hlock might have been released and cleared.
697 *
698 * If this indeed happens, lets pretend it does not hurt to continue
699 * to print the lock unless the hlock class_idx does not point to a
700 * registered class. The rationale here is: since we don't attempt
701 * to distinguish whether we are in this situation, if it just
702 * happened we can't count on class_idx to tell either.
703 */
704 struct lock_class *lock = hlock_class(hlock);
705
706 if (!lock) {
707 printk(KERN_CONT "<RELEASED>\n");
708 return;
709 }
710
711 printk(KERN_CONT "%px", hlock->instance);
712 print_lock_name(lock);
713 printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip);
714}
715
716static void lockdep_print_held_locks(struct task_struct *p)
717{
718 int i, depth = READ_ONCE(p->lockdep_depth);
719
720 if (!depth)
721 printk("no locks held by %s/%d.\n", p->comm, task_pid_nr(p));
722 else
723 printk("%d lock%s held by %s/%d:\n", depth,
724 depth > 1 ? "s" : "", p->comm, task_pid_nr(p));
725 /*
726 * It's not reliable to print a task's held locks if it's not sleeping
727 * and it's not the current task.
728 */
729 if (p->state == TASK_RUNNING && p != current)
730 return;
731 for (i = 0; i < depth; i++) {
732 printk(" #%d: ", i);
733 print_lock(p->held_locks + i);
734 }
735}
736
737static void print_kernel_ident(void)
738{
739 printk("%s %.*s %s\n", init_utsname()->release,
740 (int)strcspn(init_utsname()->version, " "),
741 init_utsname()->version,
742 print_tainted());
743}
744
745static int very_verbose(struct lock_class *class)
746{
747#if VERY_VERBOSE
748 return class_filter(class);
749#endif
750 return 0;
751}
752
753/*
754 * Is this the address of a static object:
755 */
756#ifdef __KERNEL__
757static int static_obj(const void *obj)
758{
759 unsigned long start = (unsigned long) &_stext,
760 end = (unsigned long) &_end,
761 addr = (unsigned long) obj;
762
763 if (arch_is_kernel_initmem_freed(addr))
764 return 0;
765
766 /*
767 * static variable?
768 */
769 if ((addr >= start) && (addr < end))
770 return 1;
771
772 if (arch_is_kernel_data(addr))
773 return 1;
774
775 /*
776 * in-kernel percpu var?
777 */
778 if (is_kernel_percpu_address(addr))
779 return 1;
780
781 /*
782 * module static or percpu var?
783 */
784 return is_module_address(addr) || is_module_percpu_address(addr);
785}
786#endif
787
788/*
789 * To make lock name printouts unique, we calculate a unique
790 * class->name_version generation counter. The caller must hold the graph
791 * lock.
792 */
793static int count_matching_names(struct lock_class *new_class)
794{
795 struct lock_class *class;
796 int count = 0;
797
798 if (!new_class->name)
799 return 0;
800
801 list_for_each_entry(class, &all_lock_classes, lock_entry) {
802 if (new_class->key - new_class->subclass == class->key)
803 return class->name_version;
804 if (class->name && !strcmp(class->name, new_class->name))
805 count = max(count, class->name_version);
806 }
807
808 return count + 1;
809}
810
811static inline struct lock_class *
812look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
813{
814 struct lockdep_subclass_key *key;
815 struct hlist_head *hash_head;
816 struct lock_class *class;
817
818 if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
819 debug_locks_off();
820 printk(KERN_ERR
821 "BUG: looking up invalid subclass: %u\n", subclass);
822 printk(KERN_ERR
823 "turning off the locking correctness validator.\n");
824 dump_stack();
825 return NULL;
826 }
827
828 /*
829 * If it is not initialised then it has never been locked,
830 * so it won't be present in the hash table.
831 */
832 if (unlikely(!lock->key))
833 return NULL;
834
835 /*
836 * NOTE: the class-key must be unique. For dynamic locks, a static
837 * lock_class_key variable is passed in through the mutex_init()
838 * (or spin_lock_init()) call - which acts as the key. For static
839 * locks we use the lock object itself as the key.
840 */
841 BUILD_BUG_ON(sizeof(struct lock_class_key) >
842 sizeof(struct lockdep_map));
843
844 key = lock->key->subkeys + subclass;
845
846 hash_head = classhashentry(key);
847
848 /*
849 * We do an RCU walk of the hash, see lockdep_free_key_range().
850 */
851 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
852 return NULL;
853
854 hlist_for_each_entry_rcu_notrace(class, hash_head, hash_entry) {
855 if (class->key == key) {
856 /*
857 * Huh! same key, different name? Did someone trample
858 * on some memory? We're most confused.
859 */
860 WARN_ON_ONCE(class->name != lock->name &&
861 lock->key != &__lockdep_no_validate__);
862 return class;
863 }
864 }
865
866 return NULL;
867}
868
869/*
870 * Static locks do not have their class-keys yet - for them the key is
871 * the lock object itself. If the lock is in the per cpu area, the
872 * canonical address of the lock (per cpu offset removed) is used.
873 */
874static bool assign_lock_key(struct lockdep_map *lock)
875{
876 unsigned long can_addr, addr = (unsigned long)lock;
877
878#ifdef __KERNEL__
879 /*
880 * lockdep_free_key_range() assumes that struct lock_class_key
881 * objects do not overlap. Since we use the address of lock
882 * objects as class key for static objects, check whether the
883 * size of lock_class_key objects does not exceed the size of
884 * the smallest lock object.
885 */
886 BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(raw_spinlock_t));
887#endif
888
889 if (__is_kernel_percpu_address(addr, &can_addr))
890 lock->key = (void *)can_addr;
891 else if (__is_module_percpu_address(addr, &can_addr))
892 lock->key = (void *)can_addr;
893 else if (static_obj(lock))
894 lock->key = (void *)lock;
895 else {
896 /* Debug-check: all keys must be persistent! */
897 debug_locks_off();
898 pr_err("INFO: trying to register non-static key.\n");
899 pr_err("The code is fine but needs lockdep annotation, or maybe\n");
900 pr_err("you didn't initialize this object before use?\n");
901 pr_err("turning off the locking correctness validator.\n");
902 dump_stack();
903 return false;
904 }
905
906 return true;
907}
908
909#ifdef CONFIG_DEBUG_LOCKDEP
910
911/* Check whether element @e occurs in list @h */
912static bool in_list(struct list_head *e, struct list_head *h)
913{
914 struct list_head *f;
915
916 list_for_each(f, h) {
917 if (e == f)
918 return true;
919 }
920
921 return false;
922}
923
924/*
925 * Check whether entry @e occurs in any of the locks_after or locks_before
926 * lists.
927 */
928static bool in_any_class_list(struct list_head *e)
929{
930 struct lock_class *class;
931 int i;
932
933 for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
934 class = &lock_classes[i];
935 if (in_list(e, &class->locks_after) ||
936 in_list(e, &class->locks_before))
937 return true;
938 }
939 return false;
940}
941
942static bool class_lock_list_valid(struct lock_class *c, struct list_head *h)
943{
944 struct lock_list *e;
945
946 list_for_each_entry(e, h, entry) {
947 if (e->links_to != c) {
948 printk(KERN_INFO "class %s: mismatch for lock entry %ld; class %s <> %s",
949 c->name ? : "(?)",
950 (unsigned long)(e - list_entries),
951 e->links_to && e->links_to->name ?
952 e->links_to->name : "(?)",
953 e->class && e->class->name ? e->class->name :
954 "(?)");
955 return false;
956 }
957 }
958 return true;
959}
960
961#ifdef CONFIG_PROVE_LOCKING
962static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
963#endif
964
965static bool check_lock_chain_key(struct lock_chain *chain)
966{
967#ifdef CONFIG_PROVE_LOCKING
968 u64 chain_key = INITIAL_CHAIN_KEY;
969 int i;
970
971 for (i = chain->base; i < chain->base + chain->depth; i++)
972 chain_key = iterate_chain_key(chain_key, chain_hlocks[i]);
973 /*
974 * The 'unsigned long long' casts avoid that a compiler warning
975 * is reported when building tools/lib/lockdep.
976 */
977 if (chain->chain_key != chain_key) {
978 printk(KERN_INFO "chain %lld: key %#llx <> %#llx\n",
979 (unsigned long long)(chain - lock_chains),
980 (unsigned long long)chain->chain_key,
981 (unsigned long long)chain_key);
982 return false;
983 }
984#endif
985 return true;
986}
987
988static bool in_any_zapped_class_list(struct lock_class *class)
989{
990 struct pending_free *pf;
991 int i;
992
993 for (i = 0, pf = delayed_free.pf; i < ARRAY_SIZE(delayed_free.pf); i++, pf++) {
994 if (in_list(&class->lock_entry, &pf->zapped))
995 return true;
996 }
997
998 return false;
999}
1000
1001static bool __check_data_structures(void)
1002{
1003 struct lock_class *class;
1004 struct lock_chain *chain;
1005 struct hlist_head *head;
1006 struct lock_list *e;
1007 int i;
1008
1009 /* Check whether all classes occur in a lock list. */
1010 for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
1011 class = &lock_classes[i];
1012 if (!in_list(&class->lock_entry, &all_lock_classes) &&
1013 !in_list(&class->lock_entry, &free_lock_classes) &&
1014 !in_any_zapped_class_list(class)) {
1015 printk(KERN_INFO "class %px/%s is not in any class list\n",
1016 class, class->name ? : "(?)");
1017 return false;
1018 }
1019 }
1020
1021 /* Check whether all classes have valid lock lists. */
1022 for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
1023 class = &lock_classes[i];
1024 if (!class_lock_list_valid(class, &class->locks_before))
1025 return false;
1026 if (!class_lock_list_valid(class, &class->locks_after))
1027 return false;
1028 }
1029
1030 /* Check the chain_key of all lock chains. */
1031 for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
1032 head = chainhash_table + i;
1033 hlist_for_each_entry_rcu(chain, head, entry) {
1034 if (!check_lock_chain_key(chain))
1035 return false;
1036 }
1037 }
1038
1039 /*
1040 * Check whether all list entries that are in use occur in a class
1041 * lock list.
1042 */
1043 for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
1044 e = list_entries + i;
1045 if (!in_any_class_list(&e->entry)) {
1046 printk(KERN_INFO "list entry %d is not in any class list; class %s <> %s\n",
1047 (unsigned int)(e - list_entries),
1048 e->class->name ? : "(?)",
1049 e->links_to->name ? : "(?)");
1050 return false;
1051 }
1052 }
1053
1054 /*
1055 * Check whether all list entries that are not in use do not occur in
1056 * a class lock list.
1057 */
1058 for_each_clear_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
1059 e = list_entries + i;
1060 if (in_any_class_list(&e->entry)) {
1061 printk(KERN_INFO "list entry %d occurs in a class list; class %s <> %s\n",
1062 (unsigned int)(e - list_entries),
1063 e->class && e->class->name ? e->class->name :
1064 "(?)",
1065 e->links_to && e->links_to->name ?
1066 e->links_to->name : "(?)");
1067 return false;
1068 }
1069 }
1070
1071 return true;
1072}
1073
1074int check_consistency = 0;
1075module_param(check_consistency, int, 0644);
1076
1077static void check_data_structures(void)
1078{
1079 static bool once = false;
1080
1081 if (check_consistency && !once) {
1082 if (!__check_data_structures()) {
1083 once = true;
1084 WARN_ON(once);
1085 }
1086 }
1087}
1088
1089#else /* CONFIG_DEBUG_LOCKDEP */
1090
1091static inline void check_data_structures(void) { }
1092
1093#endif /* CONFIG_DEBUG_LOCKDEP */
1094
1095/*
1096 * Initialize the lock_classes[] array elements, the free_lock_classes list
1097 * and also the delayed_free structure.
1098 */
1099static void init_data_structures_once(void)
1100{
1101 static bool ds_initialized, rcu_head_initialized;
1102 int i;
1103
1104 if (likely(rcu_head_initialized))
1105 return;
1106
1107 if (system_state >= SYSTEM_SCHEDULING) {
1108 init_rcu_head(&delayed_free.rcu_head);
1109 rcu_head_initialized = true;
1110 }
1111
1112 if (ds_initialized)
1113 return;
1114
1115 ds_initialized = true;
1116
1117 INIT_LIST_HEAD(&delayed_free.pf[0].zapped);
1118 INIT_LIST_HEAD(&delayed_free.pf[1].zapped);
1119
1120 for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
1121 list_add_tail(&lock_classes[i].lock_entry, &free_lock_classes);
1122 INIT_LIST_HEAD(&lock_classes[i].locks_after);
1123 INIT_LIST_HEAD(&lock_classes[i].locks_before);
1124 }
1125}
1126
1127static inline struct hlist_head *keyhashentry(const struct lock_class_key *key)
1128{
1129 unsigned long hash = hash_long((uintptr_t)key, KEYHASH_BITS);
1130
1131 return lock_keys_hash + hash;
1132}
1133
1134/* Register a dynamically allocated key. */
1135void lockdep_register_key(struct lock_class_key *key)
1136{
1137 struct hlist_head *hash_head;
1138 struct lock_class_key *k;
1139 unsigned long flags;
1140
1141 if (WARN_ON_ONCE(static_obj(key)))
1142 return;
1143 hash_head = keyhashentry(key);
1144
1145 raw_local_irq_save(flags);
1146 if (!graph_lock())
1147 goto restore_irqs;
1148 hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
1149 if (WARN_ON_ONCE(k == key))
1150 goto out_unlock;
1151 }
1152 hlist_add_head_rcu(&key->hash_entry, hash_head);
1153out_unlock:
1154 graph_unlock();
1155restore_irqs:
1156 raw_local_irq_restore(flags);
1157}
1158EXPORT_SYMBOL_GPL(lockdep_register_key);
1159
1160/* Check whether a key has been registered as a dynamic key. */
1161static bool is_dynamic_key(const struct lock_class_key *key)
1162{
1163 struct hlist_head *hash_head;
1164 struct lock_class_key *k;
1165 bool found = false;
1166
1167 if (WARN_ON_ONCE(static_obj(key)))
1168 return false;
1169
1170 /*
1171 * If lock debugging is disabled lock_keys_hash[] may contain
1172 * pointers to memory that has already been freed. Avoid triggering
1173 * a use-after-free in that case by returning early.
1174 */
1175 if (!debug_locks)
1176 return true;
1177
1178 hash_head = keyhashentry(key);
1179
1180 rcu_read_lock();
1181 hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
1182 if (k == key) {
1183 found = true;
1184 break;
1185 }
1186 }
1187 rcu_read_unlock();
1188
1189 return found;
1190}
1191
1192/*
1193 * Register a lock's class in the hash-table, if the class is not present
1194 * yet. Otherwise we look it up. We cache the result in the lock object
1195 * itself, so actual lookup of the hash should be once per lock object.
1196 */
1197static struct lock_class *
1198register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
1199{
1200 struct lockdep_subclass_key *key;
1201 struct hlist_head *hash_head;
1202 struct lock_class *class;
1203
1204 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1205
1206 class = look_up_lock_class(lock, subclass);
1207 if (likely(class))
1208 goto out_set_class_cache;
1209
1210 if (!lock->key) {
1211 if (!assign_lock_key(lock))
1212 return NULL;
1213 } else if (!static_obj(lock->key) && !is_dynamic_key(lock->key)) {
1214 return NULL;
1215 }
1216
1217 key = lock->key->subkeys + subclass;
1218 hash_head = classhashentry(key);
1219
1220 if (!graph_lock()) {
1221 return NULL;
1222 }
1223 /*
1224 * We have to do the hash-walk again, to avoid races
1225 * with another CPU:
1226 */
1227 hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
1228 if (class->key == key)
1229 goto out_unlock_set;
1230 }
1231
1232 init_data_structures_once();
1233
1234 /* Allocate a new lock class and add it to the hash. */
1235 class = list_first_entry_or_null(&free_lock_classes, typeof(*class),
1236 lock_entry);
1237 if (!class) {
1238 if (!debug_locks_off_graph_unlock()) {
1239 return NULL;
1240 }
1241
1242 print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
1243 dump_stack();
1244 return NULL;
1245 }
1246 nr_lock_classes++;
1247 __set_bit(class - lock_classes, lock_classes_in_use);
1248 debug_atomic_inc(nr_unused_locks);
1249 class->key = key;
1250 class->name = lock->name;
1251 class->subclass = subclass;
1252 WARN_ON_ONCE(!list_empty(&class->locks_before));
1253 WARN_ON_ONCE(!list_empty(&class->locks_after));
1254 class->name_version = count_matching_names(class);
1255 /*
1256 * We use RCU's safe list-add method to make
1257 * parallel walking of the hash-list safe:
1258 */
1259 hlist_add_head_rcu(&class->hash_entry, hash_head);
1260 /*
1261 * Remove the class from the free list and add it to the global list
1262 * of classes.
1263 */
1264 list_move_tail(&class->lock_entry, &all_lock_classes);
1265
1266 if (verbose(class)) {
1267 graph_unlock();
1268
1269 printk("\nnew class %px: %s", class->key, class->name);
1270 if (class->name_version > 1)
1271 printk(KERN_CONT "#%d", class->name_version);
1272 printk(KERN_CONT "\n");
1273 dump_stack();
1274
1275 if (!graph_lock()) {
1276 return NULL;
1277 }
1278 }
1279out_unlock_set:
1280 graph_unlock();
1281
1282out_set_class_cache:
1283 if (!subclass || force)
1284 lock->class_cache[0] = class;
1285 else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
1286 lock->class_cache[subclass] = class;
1287
1288 /*
1289 * Hash collision, did we smoke some? We found a class with a matching
1290 * hash but the subclass -- which is hashed in -- didn't match.
1291 */
1292 if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
1293 return NULL;
1294
1295 return class;
1296}
1297
1298#ifdef CONFIG_PROVE_LOCKING
1299/*
1300 * Allocate a lockdep entry. (assumes the graph_lock held, returns
1301 * with NULL on failure)
1302 */
1303static struct lock_list *alloc_list_entry(void)
1304{
1305 int idx = find_first_zero_bit(list_entries_in_use,
1306 ARRAY_SIZE(list_entries));
1307
1308 if (idx >= ARRAY_SIZE(list_entries)) {
1309 if (!debug_locks_off_graph_unlock())
1310 return NULL;
1311
1312 print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
1313 dump_stack();
1314 return NULL;
1315 }
1316 nr_list_entries++;
1317 __set_bit(idx, list_entries_in_use);
1318 return list_entries + idx;
1319}
1320
1321/*
1322 * Add a new dependency to the head of the list:
1323 */
1324static int add_lock_to_list(struct lock_class *this,
1325 struct lock_class *links_to, struct list_head *head,
1326 unsigned long ip, int distance,
1327 const struct lock_trace *trace)
1328{
1329 struct lock_list *entry;
1330 /*
1331 * Lock not present yet - get a new dependency struct and
1332 * add it to the list:
1333 */
1334 entry = alloc_list_entry();
1335 if (!entry)
1336 return 0;
1337
1338 entry->class = this;
1339 entry->links_to = links_to;
1340 entry->distance = distance;
1341 entry->trace = trace;
1342 /*
1343 * Both allocation and removal are done under the graph lock; but
1344 * iteration is under RCU-sched; see look_up_lock_class() and
1345 * lockdep_free_key_range().
1346 */
1347 list_add_tail_rcu(&entry->entry, head);
1348
1349 return 1;
1350}
1351
1352/*
1353 * For good efficiency of modular, we use power of 2
1354 */
1355#define MAX_CIRCULAR_QUEUE_SIZE 4096UL
1356#define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1)
1357
1358/*
1359 * The circular_queue and helpers are used to implement graph
1360 * breadth-first search (BFS) algorithm, by which we can determine
1361 * whether there is a path from a lock to another. In deadlock checks,
1362 * a path from the next lock to be acquired to a previous held lock
1363 * indicates that adding the <prev> -> <next> lock dependency will
1364 * produce a circle in the graph. Breadth-first search instead of
1365 * depth-first search is used in order to find the shortest (circular)
1366 * path.
1367 */
1368struct circular_queue {
1369 struct lock_list *element[MAX_CIRCULAR_QUEUE_SIZE];
1370 unsigned int front, rear;
1371};
1372
1373static struct circular_queue lock_cq;
1374
1375unsigned int max_bfs_queue_depth;
1376
1377static unsigned int lockdep_dependency_gen_id;
1378
1379static inline void __cq_init(struct circular_queue *cq)
1380{
1381 cq->front = cq->rear = 0;
1382 lockdep_dependency_gen_id++;
1383}
1384
1385static inline int __cq_empty(struct circular_queue *cq)
1386{
1387 return (cq->front == cq->rear);
1388}
1389
1390static inline int __cq_full(struct circular_queue *cq)
1391{
1392 return ((cq->rear + 1) & CQ_MASK) == cq->front;
1393}
1394
1395static inline int __cq_enqueue(struct circular_queue *cq, struct lock_list *elem)
1396{
1397 if (__cq_full(cq))
1398 return -1;
1399
1400 cq->element[cq->rear] = elem;
1401 cq->rear = (cq->rear + 1) & CQ_MASK;
1402 return 0;
1403}
1404
1405/*
1406 * Dequeue an element from the circular_queue, return a lock_list if
1407 * the queue is not empty, or NULL if otherwise.
1408 */
1409static inline struct lock_list * __cq_dequeue(struct circular_queue *cq)
1410{
1411 struct lock_list * lock;
1412
1413 if (__cq_empty(cq))
1414 return NULL;
1415
1416 lock = cq->element[cq->front];
1417 cq->front = (cq->front + 1) & CQ_MASK;
1418
1419 return lock;
1420}
1421
1422static inline unsigned int __cq_get_elem_count(struct circular_queue *cq)
1423{
1424 return (cq->rear - cq->front) & CQ_MASK;
1425}
1426
1427static inline void mark_lock_accessed(struct lock_list *lock,
1428 struct lock_list *parent)
1429{
1430 unsigned long nr;
1431
1432 nr = lock - list_entries;
1433 WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */
1434 lock->parent = parent;
1435 lock->class->dep_gen_id = lockdep_dependency_gen_id;
1436}
1437
1438static inline unsigned long lock_accessed(struct lock_list *lock)
1439{
1440 unsigned long nr;
1441
1442 nr = lock - list_entries;
1443 WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */
1444 return lock->class->dep_gen_id == lockdep_dependency_gen_id;
1445}
1446
1447static inline struct lock_list *get_lock_parent(struct lock_list *child)
1448{
1449 return child->parent;
1450}
1451
1452static inline int get_lock_depth(struct lock_list *child)
1453{
1454 int depth = 0;
1455 struct lock_list *parent;
1456
1457 while ((parent = get_lock_parent(child))) {
1458 child = parent;
1459 depth++;
1460 }
1461 return depth;
1462}
1463
1464/*
1465 * Return the forward or backward dependency list.
1466 *
1467 * @lock: the lock_list to get its class's dependency list
1468 * @offset: the offset to struct lock_class to determine whether it is
1469 * locks_after or locks_before
1470 */
1471static inline struct list_head *get_dep_list(struct lock_list *lock, int offset)
1472{
1473 void *lock_class = lock->class;
1474
1475 return lock_class + offset;
1476}
1477
1478/*
1479 * Forward- or backward-dependency search, used for both circular dependency
1480 * checking and hardirq-unsafe/softirq-unsafe checking.
1481 */
1482static int __bfs(struct lock_list *source_entry,
1483 void *data,
1484 int (*match)(struct lock_list *entry, void *data),
1485 struct lock_list **target_entry,
1486 int offset)
1487{
1488 struct lock_list *entry;
1489 struct lock_list *lock;
1490 struct list_head *head;
1491 struct circular_queue *cq = &lock_cq;
1492 int ret = 1;
1493
1494 lockdep_assert_locked();
1495
1496 if (match(source_entry, data)) {
1497 *target_entry = source_entry;
1498 ret = 0;
1499 goto exit;
1500 }
1501
1502 head = get_dep_list(source_entry, offset);
1503 if (list_empty(head))
1504 goto exit;
1505
1506 __cq_init(cq);
1507 __cq_enqueue(cq, source_entry);
1508
1509 while ((lock = __cq_dequeue(cq))) {
1510
1511 if (!lock->class) {
1512 ret = -2;
1513 goto exit;
1514 }
1515
1516 head = get_dep_list(lock, offset);
1517
1518 list_for_each_entry_rcu(entry, head, entry) {
1519 if (!lock_accessed(entry)) {
1520 unsigned int cq_depth;
1521 mark_lock_accessed(entry, lock);
1522 if (match(entry, data)) {
1523 *target_entry = entry;
1524 ret = 0;
1525 goto exit;
1526 }
1527
1528 if (__cq_enqueue(cq, entry)) {
1529 ret = -1;
1530 goto exit;
1531 }
1532 cq_depth = __cq_get_elem_count(cq);
1533 if (max_bfs_queue_depth < cq_depth)
1534 max_bfs_queue_depth = cq_depth;
1535 }
1536 }
1537 }
1538exit:
1539 return ret;
1540}
1541
1542static inline int __bfs_forwards(struct lock_list *src_entry,
1543 void *data,
1544 int (*match)(struct lock_list *entry, void *data),
1545 struct lock_list **target_entry)
1546{
1547 return __bfs(src_entry, data, match, target_entry,
1548 offsetof(struct lock_class, locks_after));
1549
1550}
1551
1552static inline int __bfs_backwards(struct lock_list *src_entry,
1553 void *data,
1554 int (*match)(struct lock_list *entry, void *data),
1555 struct lock_list **target_entry)
1556{
1557 return __bfs(src_entry, data, match, target_entry,
1558 offsetof(struct lock_class, locks_before));
1559
1560}
1561
1562static void print_lock_trace(const struct lock_trace *trace,
1563 unsigned int spaces)
1564{
1565 stack_trace_print(trace->entries, trace->nr_entries, spaces);
1566}
1567
1568/*
1569 * Print a dependency chain entry (this is only done when a deadlock
1570 * has been detected):
1571 */
1572static noinline void
1573print_circular_bug_entry(struct lock_list *target, int depth)
1574{
1575 if (debug_locks_silent)
1576 return;
1577 printk("\n-> #%u", depth);
1578 print_lock_name(target->class);
1579 printk(KERN_CONT ":\n");
1580 print_lock_trace(target->trace, 6);
1581}
1582
1583static void
1584print_circular_lock_scenario(struct held_lock *src,
1585 struct held_lock *tgt,
1586 struct lock_list *prt)
1587{
1588 struct lock_class *source = hlock_class(src);
1589 struct lock_class *target = hlock_class(tgt);
1590 struct lock_class *parent = prt->class;
1591
1592 /*
1593 * A direct locking problem where unsafe_class lock is taken
1594 * directly by safe_class lock, then all we need to show
1595 * is the deadlock scenario, as it is obvious that the
1596 * unsafe lock is taken under the safe lock.
1597 *
1598 * But if there is a chain instead, where the safe lock takes
1599 * an intermediate lock (middle_class) where this lock is
1600 * not the same as the safe lock, then the lock chain is
1601 * used to describe the problem. Otherwise we would need
1602 * to show a different CPU case for each link in the chain
1603 * from the safe_class lock to the unsafe_class lock.
1604 */
1605 if (parent != source) {
1606 printk("Chain exists of:\n ");
1607 __print_lock_name(source);
1608 printk(KERN_CONT " --> ");
1609 __print_lock_name(parent);
1610 printk(KERN_CONT " --> ");
1611 __print_lock_name(target);
1612 printk(KERN_CONT "\n\n");
1613 }
1614
1615 printk(" Possible unsafe locking scenario:\n\n");
1616 printk(" CPU0 CPU1\n");
1617 printk(" ---- ----\n");
1618 printk(" lock(");
1619 __print_lock_name(target);
1620 printk(KERN_CONT ");\n");
1621 printk(" lock(");
1622 __print_lock_name(parent);
1623 printk(KERN_CONT ");\n");
1624 printk(" lock(");
1625 __print_lock_name(target);
1626 printk(KERN_CONT ");\n");
1627 printk(" lock(");
1628 __print_lock_name(source);
1629 printk(KERN_CONT ");\n");
1630 printk("\n *** DEADLOCK ***\n\n");
1631}
1632
1633/*
1634 * When a circular dependency is detected, print the
1635 * header first:
1636 */
1637static noinline void
1638print_circular_bug_header(struct lock_list *entry, unsigned int depth,
1639 struct held_lock *check_src,
1640 struct held_lock *check_tgt)
1641{
1642 struct task_struct *curr = current;
1643
1644 if (debug_locks_silent)
1645 return;
1646
1647 pr_warn("\n");
1648 pr_warn("======================================================\n");
1649 pr_warn("WARNING: possible circular locking dependency detected\n");
1650 print_kernel_ident();
1651 pr_warn("------------------------------------------------------\n");
1652 pr_warn("%s/%d is trying to acquire lock:\n",
1653 curr->comm, task_pid_nr(curr));
1654 print_lock(check_src);
1655
1656 pr_warn("\nbut task is already holding lock:\n");
1657
1658 print_lock(check_tgt);
1659 pr_warn("\nwhich lock already depends on the new lock.\n\n");
1660 pr_warn("\nthe existing dependency chain (in reverse order) is:\n");
1661
1662 print_circular_bug_entry(entry, depth);
1663}
1664
1665static inline int class_equal(struct lock_list *entry, void *data)
1666{
1667 return entry->class == data;
1668}
1669
1670static noinline void print_circular_bug(struct lock_list *this,
1671 struct lock_list *target,
1672 struct held_lock *check_src,
1673 struct held_lock *check_tgt)
1674{
1675 struct task_struct *curr = current;
1676 struct lock_list *parent;
1677 struct lock_list *first_parent;
1678 int depth;
1679
1680 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1681 return;
1682
1683 this->trace = save_trace();
1684 if (!this->trace)
1685 return;
1686
1687 depth = get_lock_depth(target);
1688
1689 print_circular_bug_header(target, depth, check_src, check_tgt);
1690
1691 parent = get_lock_parent(target);
1692 first_parent = parent;
1693
1694 while (parent) {
1695 print_circular_bug_entry(parent, --depth);
1696 parent = get_lock_parent(parent);
1697 }
1698
1699 printk("\nother info that might help us debug this:\n\n");
1700 print_circular_lock_scenario(check_src, check_tgt,
1701 first_parent);
1702
1703 lockdep_print_held_locks(curr);
1704
1705 printk("\nstack backtrace:\n");
1706 dump_stack();
1707}
1708
1709static noinline void print_bfs_bug(int ret)
1710{
1711 if (!debug_locks_off_graph_unlock())
1712 return;
1713
1714 /*
1715 * Breadth-first-search failed, graph got corrupted?
1716 */
1717 WARN(1, "lockdep bfs error:%d\n", ret);
1718}
1719
1720static int noop_count(struct lock_list *entry, void *data)
1721{
1722 (*(unsigned long *)data)++;
1723 return 0;
1724}
1725
1726static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
1727{
1728 unsigned long count = 0;
1729 struct lock_list *target_entry;
1730
1731 __bfs_forwards(this, (void *)&count, noop_count, &target_entry);
1732
1733 return count;
1734}
1735unsigned long lockdep_count_forward_deps(struct lock_class *class)
1736{
1737 unsigned long ret, flags;
1738 struct lock_list this;
1739
1740 this.parent = NULL;
1741 this.class = class;
1742
1743 raw_local_irq_save(flags);
1744 lockdep_lock();
1745 ret = __lockdep_count_forward_deps(&this);
1746 lockdep_unlock();
1747 raw_local_irq_restore(flags);
1748
1749 return ret;
1750}
1751
1752static unsigned long __lockdep_count_backward_deps(struct lock_list *this)
1753{
1754 unsigned long count = 0;
1755 struct lock_list *target_entry;
1756
1757 __bfs_backwards(this, (void *)&count, noop_count, &target_entry);
1758
1759 return count;
1760}
1761
1762unsigned long lockdep_count_backward_deps(struct lock_class *class)
1763{
1764 unsigned long ret, flags;
1765 struct lock_list this;
1766
1767 this.parent = NULL;
1768 this.class = class;
1769
1770 raw_local_irq_save(flags);
1771 lockdep_lock();
1772 ret = __lockdep_count_backward_deps(&this);
1773 lockdep_unlock();
1774 raw_local_irq_restore(flags);
1775
1776 return ret;
1777}
1778
1779/*
1780 * Check that the dependency graph starting at <src> can lead to
1781 * <target> or not. Print an error and return 0 if it does.
1782 */
1783static noinline int
1784check_path(struct lock_class *target, struct lock_list *src_entry,
1785 struct lock_list **target_entry)
1786{
1787 int ret;
1788
1789 ret = __bfs_forwards(src_entry, (void *)target, class_equal,
1790 target_entry);
1791
1792 if (unlikely(ret < 0))
1793 print_bfs_bug(ret);
1794
1795 return ret;
1796}
1797
1798/*
1799 * Prove that the dependency graph starting at <src> can not
1800 * lead to <target>. If it can, there is a circle when adding
1801 * <target> -> <src> dependency.
1802 *
1803 * Print an error and return 0 if it does.
1804 */
1805static noinline int
1806check_noncircular(struct held_lock *src, struct held_lock *target,
1807 struct lock_trace **const trace)
1808{
1809 int ret;
1810 struct lock_list *target_entry;
1811 struct lock_list src_entry = {
1812 .class = hlock_class(src),
1813 .parent = NULL,
1814 };
1815
1816 debug_atomic_inc(nr_cyclic_checks);
1817
1818 ret = check_path(hlock_class(target), &src_entry, &target_entry);
1819
1820 if (unlikely(!ret)) {
1821 if (!*trace) {
1822 /*
1823 * If save_trace fails here, the printing might
1824 * trigger a WARN but because of the !nr_entries it
1825 * should not do bad things.
1826 */
1827 *trace = save_trace();
1828 }
1829
1830 print_circular_bug(&src_entry, target_entry, src, target);
1831 }
1832
1833 return ret;
1834}
1835
1836#ifdef CONFIG_LOCKDEP_SMALL
1837/*
1838 * Check that the dependency graph starting at <src> can lead to
1839 * <target> or not. If it can, <src> -> <target> dependency is already
1840 * in the graph.
1841 *
1842 * Print an error and return 2 if it does or 1 if it does not.
1843 */
1844static noinline int
1845check_redundant(struct held_lock *src, struct held_lock *target)
1846{
1847 int ret;
1848 struct lock_list *target_entry;
1849 struct lock_list src_entry = {
1850 .class = hlock_class(src),
1851 .parent = NULL,
1852 };
1853
1854 debug_atomic_inc(nr_redundant_checks);
1855
1856 ret = check_path(hlock_class(target), &src_entry, &target_entry);
1857
1858 if (!ret) {
1859 debug_atomic_inc(nr_redundant);
1860 ret = 2;
1861 } else if (ret < 0)
1862 ret = 0;
1863
1864 return ret;
1865}
1866#endif
1867
1868#ifdef CONFIG_TRACE_IRQFLAGS
1869
1870static inline int usage_accumulate(struct lock_list *entry, void *mask)
1871{
1872 *(unsigned long *)mask |= entry->class->usage_mask;
1873
1874 return 0;
1875}
1876
1877/*
1878 * Forwards and backwards subgraph searching, for the purposes of
1879 * proving that two subgraphs can be connected by a new dependency
1880 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1881 */
1882
1883static inline int usage_match(struct lock_list *entry, void *mask)
1884{
1885 return entry->class->usage_mask & *(unsigned long *)mask;
1886}
1887
1888/*
1889 * Find a node in the forwards-direction dependency sub-graph starting
1890 * at @root->class that matches @bit.
1891 *
1892 * Return 0 if such a node exists in the subgraph, and put that node
1893 * into *@target_entry.
1894 *
1895 * Return 1 otherwise and keep *@target_entry unchanged.
1896 * Return <0 on error.
1897 */
1898static int
1899find_usage_forwards(struct lock_list *root, unsigned long usage_mask,
1900 struct lock_list **target_entry)
1901{
1902 int result;
1903
1904 debug_atomic_inc(nr_find_usage_forwards_checks);
1905
1906 result = __bfs_forwards(root, &usage_mask, usage_match, target_entry);
1907
1908 return result;
1909}
1910
1911/*
1912 * Find a node in the backwards-direction dependency sub-graph starting
1913 * at @root->class that matches @bit.
1914 *
1915 * Return 0 if such a node exists in the subgraph, and put that node
1916 * into *@target_entry.
1917 *
1918 * Return 1 otherwise and keep *@target_entry unchanged.
1919 * Return <0 on error.
1920 */
1921static int
1922find_usage_backwards(struct lock_list *root, unsigned long usage_mask,
1923 struct lock_list **target_entry)
1924{
1925 int result;
1926
1927 debug_atomic_inc(nr_find_usage_backwards_checks);
1928
1929 result = __bfs_backwards(root, &usage_mask, usage_match, target_entry);
1930
1931 return result;
1932}
1933
1934static void print_lock_class_header(struct lock_class *class, int depth)
1935{
1936 int bit;
1937
1938 printk("%*s->", depth, "");
1939 print_lock_name(class);
1940#ifdef CONFIG_DEBUG_LOCKDEP
1941 printk(KERN_CONT " ops: %lu", debug_class_ops_read(class));
1942#endif
1943 printk(KERN_CONT " {\n");
1944
1945 for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
1946 if (class->usage_mask & (1 << bit)) {
1947 int len = depth;
1948
1949 len += printk("%*s %s", depth, "", usage_str[bit]);
1950 len += printk(KERN_CONT " at:\n");
1951 print_lock_trace(class->usage_traces[bit], len);
1952 }
1953 }
1954 printk("%*s }\n", depth, "");
1955
1956 printk("%*s ... key at: [<%px>] %pS\n",
1957 depth, "", class->key, class->key);
1958}
1959
1960/*
1961 * Dependency path printing:
1962 *
1963 * After BFS we get a lock dependency path (linked via ->parent of lock_list),
1964 * printing out each lock in the dependency path will help on understanding how
1965 * the deadlock could happen. Here are some details about dependency path
1966 * printing:
1967 *
1968 * 1) A lock_list can be either forwards or backwards for a lock dependency,
1969 * for a lock dependency A -> B, there are two lock_lists:
1970 *
1971 * a) lock_list in the ->locks_after list of A, whose ->class is B and
1972 * ->links_to is A. In this case, we can say the lock_list is
1973 * "A -> B" (forwards case).
1974 *
1975 * b) lock_list in the ->locks_before list of B, whose ->class is A
1976 * and ->links_to is B. In this case, we can say the lock_list is
1977 * "B <- A" (bacwards case).
1978 *
1979 * The ->trace of both a) and b) point to the call trace where B was
1980 * acquired with A held.
1981 *
1982 * 2) A "helper" lock_list is introduced during BFS, this lock_list doesn't
1983 * represent a certain lock dependency, it only provides an initial entry
1984 * for BFS. For example, BFS may introduce a "helper" lock_list whose
1985 * ->class is A, as a result BFS will search all dependencies starting with
1986 * A, e.g. A -> B or A -> C.
1987 *
1988 * The notation of a forwards helper lock_list is like "-> A", which means
1989 * we should search the forwards dependencies starting with "A", e.g A -> B
1990 * or A -> C.
1991 *
1992 * The notation of a bacwards helper lock_list is like "<- B", which means
1993 * we should search the backwards dependencies ending with "B", e.g.
1994 * B <- A or B <- C.
1995 */
1996
1997/*
1998 * printk the shortest lock dependencies from @root to @leaf in reverse order.
1999 *
2000 * We have a lock dependency path as follow:
2001 *
2002 * @root @leaf
2003 * | |
2004 * V V
2005 * ->parent ->parent
2006 * | lock_list | <--------- | lock_list | ... | lock_list | <--------- | lock_list |
2007 * | -> L1 | | L1 -> L2 | ... |Ln-2 -> Ln-1| | Ln-1 -> Ln|
2008 *
2009 * , so it's natural that we start from @leaf and print every ->class and
2010 * ->trace until we reach the @root.
2011 */
2012static void __used
2013print_shortest_lock_dependencies(struct lock_list *leaf,
2014 struct lock_list *root)
2015{
2016 struct lock_list *entry = leaf;
2017 int depth;
2018
2019 /*compute depth from generated tree by BFS*/
2020 depth = get_lock_depth(leaf);
2021
2022 do {
2023 print_lock_class_header(entry->class, depth);
2024 printk("%*s ... acquired at:\n", depth, "");
2025 print_lock_trace(entry->trace, 2);
2026 printk("\n");
2027
2028 if (depth == 0 && (entry != root)) {
2029 printk("lockdep:%s bad path found in chain graph\n", __func__);
2030 break;
2031 }
2032
2033 entry = get_lock_parent(entry);
2034 depth--;
2035 } while (entry && (depth >= 0));
2036}
2037
2038/*
2039 * printk the shortest lock dependencies from @leaf to @root.
2040 *
2041 * We have a lock dependency path (from a backwards search) as follow:
2042 *
2043 * @leaf @root
2044 * | |
2045 * V V
2046 * ->parent ->parent
2047 * | lock_list | ---------> | lock_list | ... | lock_list | ---------> | lock_list |
2048 * | L2 <- L1 | | L3 <- L2 | ... | Ln <- Ln-1 | | <- Ln |
2049 *
2050 * , so when we iterate from @leaf to @root, we actually print the lock
2051 * dependency path L1 -> L2 -> .. -> Ln in the non-reverse order.
2052 *
2053 * Another thing to notice here is that ->class of L2 <- L1 is L1, while the
2054 * ->trace of L2 <- L1 is the call trace of L2, in fact we don't have the call
2055 * trace of L1 in the dependency path, which is alright, because most of the
2056 * time we can figure out where L1 is held from the call trace of L2.
2057 */
2058static void __used
2059print_shortest_lock_dependencies_backwards(struct lock_list *leaf,
2060 struct lock_list *root)
2061{
2062 struct lock_list *entry = leaf;
2063 const struct lock_trace *trace = NULL;
2064 int depth;
2065
2066 /*compute depth from generated tree by BFS*/
2067 depth = get_lock_depth(leaf);
2068
2069 do {
2070 print_lock_class_header(entry->class, depth);
2071 if (trace) {
2072 printk("%*s ... acquired at:\n", depth, "");
2073 print_lock_trace(trace, 2);
2074 printk("\n");
2075 }
2076
2077 /*
2078 * Record the pointer to the trace for the next lock_list
2079 * entry, see the comments for the function.
2080 */
2081 trace = entry->trace;
2082
2083 if (depth == 0 && (entry != root)) {
2084 printk("lockdep:%s bad path found in chain graph\n", __func__);
2085 break;
2086 }
2087
2088 entry = get_lock_parent(entry);
2089 depth--;
2090 } while (entry && (depth >= 0));
2091}
2092
2093static void
2094print_irq_lock_scenario(struct lock_list *safe_entry,
2095 struct lock_list *unsafe_entry,
2096 struct lock_class *prev_class,
2097 struct lock_class *next_class)
2098{
2099 struct lock_class *safe_class = safe_entry->class;
2100 struct lock_class *unsafe_class = unsafe_entry->class;
2101 struct lock_class *middle_class = prev_class;
2102
2103 if (middle_class == safe_class)
2104 middle_class = next_class;
2105
2106 /*
2107 * A direct locking problem where unsafe_class lock is taken
2108 * directly by safe_class lock, then all we need to show
2109 * is the deadlock scenario, as it is obvious that the
2110 * unsafe lock is taken under the safe lock.
2111 *
2112 * But if there is a chain instead, where the safe lock takes
2113 * an intermediate lock (middle_class) where this lock is
2114 * not the same as the safe lock, then the lock chain is
2115 * used to describe the problem. Otherwise we would need
2116 * to show a different CPU case for each link in the chain
2117 * from the safe_class lock to the unsafe_class lock.
2118 */
2119 if (middle_class != unsafe_class) {
2120 printk("Chain exists of:\n ");
2121 __print_lock_name(safe_class);
2122 printk(KERN_CONT " --> ");
2123 __print_lock_name(middle_class);
2124 printk(KERN_CONT " --> ");
2125 __print_lock_name(unsafe_class);
2126 printk(KERN_CONT "\n\n");
2127 }
2128
2129 printk(" Possible interrupt unsafe locking scenario:\n\n");
2130 printk(" CPU0 CPU1\n");
2131 printk(" ---- ----\n");
2132 printk(" lock(");
2133 __print_lock_name(unsafe_class);
2134 printk(KERN_CONT ");\n");
2135 printk(" local_irq_disable();\n");
2136 printk(" lock(");
2137 __print_lock_name(safe_class);
2138 printk(KERN_CONT ");\n");
2139 printk(" lock(");
2140 __print_lock_name(middle_class);
2141 printk(KERN_CONT ");\n");
2142 printk(" <Interrupt>\n");
2143 printk(" lock(");
2144 __print_lock_name(safe_class);
2145 printk(KERN_CONT ");\n");
2146 printk("\n *** DEADLOCK ***\n\n");
2147}
2148
2149static void
2150print_bad_irq_dependency(struct task_struct *curr,
2151 struct lock_list *prev_root,
2152 struct lock_list *next_root,
2153 struct lock_list *backwards_entry,
2154 struct lock_list *forwards_entry,
2155 struct held_lock *prev,
2156 struct held_lock *next,
2157 enum lock_usage_bit bit1,
2158 enum lock_usage_bit bit2,
2159 const char *irqclass)
2160{
2161 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2162 return;
2163
2164 pr_warn("\n");
2165 pr_warn("=====================================================\n");
2166 pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
2167 irqclass, irqclass);
2168 print_kernel_ident();
2169 pr_warn("-----------------------------------------------------\n");
2170 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
2171 curr->comm, task_pid_nr(curr),
2172 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
2173 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
2174 curr->hardirqs_enabled,
2175 curr->softirqs_enabled);
2176 print_lock(next);
2177
2178 pr_warn("\nand this task is already holding:\n");
2179 print_lock(prev);
2180 pr_warn("which would create a new lock dependency:\n");
2181 print_lock_name(hlock_class(prev));
2182 pr_cont(" ->");
2183 print_lock_name(hlock_class(next));
2184 pr_cont("\n");
2185
2186 pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n",
2187 irqclass);
2188 print_lock_name(backwards_entry->class);
2189 pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
2190
2191 print_lock_trace(backwards_entry->class->usage_traces[bit1], 1);
2192
2193 pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
2194 print_lock_name(forwards_entry->class);
2195 pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
2196 pr_warn("...");
2197
2198 print_lock_trace(forwards_entry->class->usage_traces[bit2], 1);
2199
2200 pr_warn("\nother info that might help us debug this:\n\n");
2201 print_irq_lock_scenario(backwards_entry, forwards_entry,
2202 hlock_class(prev), hlock_class(next));
2203
2204 lockdep_print_held_locks(curr);
2205
2206 pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
2207 prev_root->trace = save_trace();
2208 if (!prev_root->trace)
2209 return;
2210 print_shortest_lock_dependencies_backwards(backwards_entry, prev_root);
2211
2212 pr_warn("\nthe dependencies between the lock to be acquired");
2213 pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
2214 next_root->trace = save_trace();
2215 if (!next_root->trace)
2216 return;
2217 print_shortest_lock_dependencies(forwards_entry, next_root);
2218
2219 pr_warn("\nstack backtrace:\n");
2220 dump_stack();
2221}
2222
2223static const char *state_names[] = {
2224#define LOCKDEP_STATE(__STATE) \
2225 __stringify(__STATE),
2226#include "lockdep_states.h"
2227#undef LOCKDEP_STATE
2228};
2229
2230static const char *state_rnames[] = {
2231#define LOCKDEP_STATE(__STATE) \
2232 __stringify(__STATE)"-READ",
2233#include "lockdep_states.h"
2234#undef LOCKDEP_STATE
2235};
2236
2237static inline const char *state_name(enum lock_usage_bit bit)
2238{
2239 if (bit & LOCK_USAGE_READ_MASK)
2240 return state_rnames[bit >> LOCK_USAGE_DIR_MASK];
2241 else
2242 return state_names[bit >> LOCK_USAGE_DIR_MASK];
2243}
2244
2245/*
2246 * The bit number is encoded like:
2247 *
2248 * bit0: 0 exclusive, 1 read lock
2249 * bit1: 0 used in irq, 1 irq enabled
2250 * bit2-n: state
2251 */
2252static int exclusive_bit(int new_bit)
2253{
2254 int state = new_bit & LOCK_USAGE_STATE_MASK;
2255 int dir = new_bit & LOCK_USAGE_DIR_MASK;
2256
2257 /*
2258 * keep state, bit flip the direction and strip read.
2259 */
2260 return state | (dir ^ LOCK_USAGE_DIR_MASK);
2261}
2262
2263/*
2264 * Observe that when given a bitmask where each bitnr is encoded as above, a
2265 * right shift of the mask transforms the individual bitnrs as -1 and
2266 * conversely, a left shift transforms into +1 for the individual bitnrs.
2267 *
2268 * So for all bits whose number have LOCK_ENABLED_* set (bitnr1 == 1), we can
2269 * create the mask with those bit numbers using LOCK_USED_IN_* (bitnr1 == 0)
2270 * instead by subtracting the bit number by 2, or shifting the mask right by 2.
2271 *
2272 * Similarly, bitnr1 == 0 becomes bitnr1 == 1 by adding 2, or shifting left 2.
2273 *
2274 * So split the mask (note that LOCKF_ENABLED_IRQ_ALL|LOCKF_USED_IN_IRQ_ALL is
2275 * all bits set) and recompose with bitnr1 flipped.
2276 */
2277static unsigned long invert_dir_mask(unsigned long mask)
2278{
2279 unsigned long excl = 0;
2280
2281 /* Invert dir */
2282 excl |= (mask & LOCKF_ENABLED_IRQ_ALL) >> LOCK_USAGE_DIR_MASK;
2283 excl |= (mask & LOCKF_USED_IN_IRQ_ALL) << LOCK_USAGE_DIR_MASK;
2284
2285 return excl;
2286}
2287
2288/*
2289 * As above, we clear bitnr0 (LOCK_*_READ off) with bitmask ops. First, for all
2290 * bits with bitnr0 set (LOCK_*_READ), add those with bitnr0 cleared (LOCK_*).
2291 * And then mask out all bitnr0.
2292 */
2293static unsigned long exclusive_mask(unsigned long mask)
2294{
2295 unsigned long excl = invert_dir_mask(mask);
2296
2297 /* Strip read */
2298 excl |= (excl & LOCKF_IRQ_READ) >> LOCK_USAGE_READ_MASK;
2299 excl &= ~LOCKF_IRQ_READ;
2300
2301 return excl;
2302}
2303
2304/*
2305 * Retrieve the _possible_ original mask to which @mask is
2306 * exclusive. Ie: this is the opposite of exclusive_mask().
2307 * Note that 2 possible original bits can match an exclusive
2308 * bit: one has LOCK_USAGE_READ_MASK set, the other has it
2309 * cleared. So both are returned for each exclusive bit.
2310 */
2311static unsigned long original_mask(unsigned long mask)
2312{
2313 unsigned long excl = invert_dir_mask(mask);
2314
2315 /* Include read in existing usages */
2316 excl |= (excl & LOCKF_IRQ) << LOCK_USAGE_READ_MASK;
2317
2318 return excl;
2319}
2320
2321/*
2322 * Find the first pair of bit match between an original
2323 * usage mask and an exclusive usage mask.
2324 */
2325static int find_exclusive_match(unsigned long mask,
2326 unsigned long excl_mask,
2327 enum lock_usage_bit *bitp,
2328 enum lock_usage_bit *excl_bitp)
2329{
2330 int bit, excl;
2331
2332 for_each_set_bit(bit, &mask, LOCK_USED) {
2333 excl = exclusive_bit(bit);
2334 if (excl_mask & lock_flag(excl)) {
2335 *bitp = bit;
2336 *excl_bitp = excl;
2337 return 0;
2338 }
2339 }
2340 return -1;
2341}
2342
2343/*
2344 * Prove that the new dependency does not connect a hardirq-safe(-read)
2345 * lock with a hardirq-unsafe lock - to achieve this we search
2346 * the backwards-subgraph starting at <prev>, and the
2347 * forwards-subgraph starting at <next>:
2348 */
2349static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
2350 struct held_lock *next)
2351{
2352 unsigned long usage_mask = 0, forward_mask, backward_mask;
2353 enum lock_usage_bit forward_bit = 0, backward_bit = 0;
2354 struct lock_list *target_entry1;
2355 struct lock_list *target_entry;
2356 struct lock_list this, that;
2357 int ret;
2358
2359 /*
2360 * Step 1: gather all hard/soft IRQs usages backward in an
2361 * accumulated usage mask.
2362 */
2363 this.parent = NULL;
2364 this.class = hlock_class(prev);
2365
2366 ret = __bfs_backwards(&this, &usage_mask, usage_accumulate, NULL);
2367 if (ret < 0) {
2368 print_bfs_bug(ret);
2369 return 0;
2370 }
2371
2372 usage_mask &= LOCKF_USED_IN_IRQ_ALL;
2373 if (!usage_mask)
2374 return 1;
2375
2376 /*
2377 * Step 2: find exclusive uses forward that match the previous
2378 * backward accumulated mask.
2379 */
2380 forward_mask = exclusive_mask(usage_mask);
2381
2382 that.parent = NULL;
2383 that.class = hlock_class(next);
2384
2385 ret = find_usage_forwards(&that, forward_mask, &target_entry1);
2386 if (ret < 0) {
2387 print_bfs_bug(ret);
2388 return 0;
2389 }
2390 if (ret == 1)
2391 return ret;
2392
2393 /*
2394 * Step 3: we found a bad match! Now retrieve a lock from the backward
2395 * list whose usage mask matches the exclusive usage mask from the
2396 * lock found on the forward list.
2397 *
2398 * Note, we should only keep the LOCKF_ENABLED_IRQ_ALL bits, considering
2399 * the follow case:
2400 *
2401 * When trying to add A -> B to the graph, we find that there is a
2402 * hardirq-safe L, that L -> ... -> A, and another hardirq-unsafe M,
2403 * that B -> ... -> M. However M is **softirq-safe**, if we use exact
2404 * invert bits of M's usage_mask, we will find another lock N that is
2405 * **softirq-unsafe** and N -> ... -> A, however N -> .. -> M will not
2406 * cause a inversion deadlock.
2407 */
2408 backward_mask = original_mask(target_entry1->class->usage_mask & LOCKF_ENABLED_IRQ_ALL);
2409
2410 ret = find_usage_backwards(&this, backward_mask, &target_entry);
2411 if (ret < 0) {
2412 print_bfs_bug(ret);
2413 return 0;
2414 }
2415 if (DEBUG_LOCKS_WARN_ON(ret == 1))
2416 return 1;
2417
2418 /*
2419 * Step 4: narrow down to a pair of incompatible usage bits
2420 * and report it.
2421 */
2422 ret = find_exclusive_match(target_entry->class->usage_mask,
2423 target_entry1->class->usage_mask,
2424 &backward_bit, &forward_bit);
2425 if (DEBUG_LOCKS_WARN_ON(ret == -1))
2426 return 1;
2427
2428 print_bad_irq_dependency(curr, &this, &that,
2429 target_entry, target_entry1,
2430 prev, next,
2431 backward_bit, forward_bit,
2432 state_name(backward_bit));
2433
2434 return 0;
2435}
2436
2437#else
2438
2439static inline int check_irq_usage(struct task_struct *curr,
2440 struct held_lock *prev, struct held_lock *next)
2441{
2442 return 1;
2443}
2444#endif /* CONFIG_TRACE_IRQFLAGS */
2445
2446static void inc_chains(int irq_context)
2447{
2448 if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT)
2449 nr_hardirq_chains++;
2450 else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT)
2451 nr_softirq_chains++;
2452 else
2453 nr_process_chains++;
2454}
2455
2456static void dec_chains(int irq_context)
2457{
2458 if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT)
2459 nr_hardirq_chains--;
2460 else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT)
2461 nr_softirq_chains--;
2462 else
2463 nr_process_chains--;
2464}
2465
2466static void
2467print_deadlock_scenario(struct held_lock *nxt, struct held_lock *prv)
2468{
2469 struct lock_class *next = hlock_class(nxt);
2470 struct lock_class *prev = hlock_class(prv);
2471
2472 printk(" Possible unsafe locking scenario:\n\n");
2473 printk(" CPU0\n");
2474 printk(" ----\n");
2475 printk(" lock(");
2476 __print_lock_name(prev);
2477 printk(KERN_CONT ");\n");
2478 printk(" lock(");
2479 __print_lock_name(next);
2480 printk(KERN_CONT ");\n");
2481 printk("\n *** DEADLOCK ***\n\n");
2482 printk(" May be due to missing lock nesting notation\n\n");
2483}
2484
2485static void
2486print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
2487 struct held_lock *next)
2488{
2489 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2490 return;
2491
2492 pr_warn("\n");
2493 pr_warn("============================================\n");
2494 pr_warn("WARNING: possible recursive locking detected\n");
2495 print_kernel_ident();
2496 pr_warn("--------------------------------------------\n");
2497 pr_warn("%s/%d is trying to acquire lock:\n",
2498 curr->comm, task_pid_nr(curr));
2499 print_lock(next);
2500 pr_warn("\nbut task is already holding lock:\n");
2501 print_lock(prev);
2502
2503 pr_warn("\nother info that might help us debug this:\n");
2504 print_deadlock_scenario(next, prev);
2505 lockdep_print_held_locks(curr);
2506
2507 pr_warn("\nstack backtrace:\n");
2508 dump_stack();
2509}
2510
2511/*
2512 * Check whether we are holding such a class already.
2513 *
2514 * (Note that this has to be done separately, because the graph cannot
2515 * detect such classes of deadlocks.)
2516 *
2517 * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
2518 */
2519static int
2520check_deadlock(struct task_struct *curr, struct held_lock *next)
2521{
2522 struct held_lock *prev;
2523 struct held_lock *nest = NULL;
2524 int i;
2525
2526 for (i = 0; i < curr->lockdep_depth; i++) {
2527 prev = curr->held_locks + i;
2528
2529 if (prev->instance == next->nest_lock)
2530 nest = prev;
2531
2532 if (hlock_class(prev) != hlock_class(next))
2533 continue;
2534
2535 /*
2536 * Allow read-after-read recursion of the same
2537 * lock class (i.e. read_lock(lock)+read_lock(lock)):
2538 */
2539 if ((next->read == 2) && prev->read)
2540 return 2;
2541
2542 /*
2543 * We're holding the nest_lock, which serializes this lock's
2544 * nesting behaviour.
2545 */
2546 if (nest)
2547 return 2;
2548
2549 print_deadlock_bug(curr, prev, next);
2550 return 0;
2551 }
2552 return 1;
2553}
2554
2555/*
2556 * There was a chain-cache miss, and we are about to add a new dependency
2557 * to a previous lock. We validate the following rules:
2558 *
2559 * - would the adding of the <prev> -> <next> dependency create a
2560 * circular dependency in the graph? [== circular deadlock]
2561 *
2562 * - does the new prev->next dependency connect any hardirq-safe lock
2563 * (in the full backwards-subgraph starting at <prev>) with any
2564 * hardirq-unsafe lock (in the full forwards-subgraph starting at
2565 * <next>)? [== illegal lock inversion with hardirq contexts]
2566 *
2567 * - does the new prev->next dependency connect any softirq-safe lock
2568 * (in the full backwards-subgraph starting at <prev>) with any
2569 * softirq-unsafe lock (in the full forwards-subgraph starting at
2570 * <next>)? [== illegal lock inversion with softirq contexts]
2571 *
2572 * any of these scenarios could lead to a deadlock.
2573 *
2574 * Then if all the validations pass, we add the forwards and backwards
2575 * dependency.
2576 */
2577static int
2578check_prev_add(struct task_struct *curr, struct held_lock *prev,
2579 struct held_lock *next, int distance,
2580 struct lock_trace **const trace)
2581{
2582 struct lock_list *entry;
2583 int ret;
2584
2585 if (!hlock_class(prev)->key || !hlock_class(next)->key) {
2586 /*
2587 * The warning statements below may trigger a use-after-free
2588 * of the class name. It is better to trigger a use-after free
2589 * and to have the class name most of the time instead of not
2590 * having the class name available.
2591 */
2592 WARN_ONCE(!debug_locks_silent && !hlock_class(prev)->key,
2593 "Detected use-after-free of lock class %px/%s\n",
2594 hlock_class(prev),
2595 hlock_class(prev)->name);
2596 WARN_ONCE(!debug_locks_silent && !hlock_class(next)->key,
2597 "Detected use-after-free of lock class %px/%s\n",
2598 hlock_class(next),
2599 hlock_class(next)->name);
2600 return 2;
2601 }
2602
2603 /*
2604 * Prove that the new <prev> -> <next> dependency would not
2605 * create a circular dependency in the graph. (We do this by
2606 * a breadth-first search into the graph starting at <next>,
2607 * and check whether we can reach <prev>.)
2608 *
2609 * The search is limited by the size of the circular queue (i.e.,
2610 * MAX_CIRCULAR_QUEUE_SIZE) which keeps track of a breadth of nodes
2611 * in the graph whose neighbours are to be checked.
2612 */
2613 ret = check_noncircular(next, prev, trace);
2614 if (unlikely(ret <= 0))
2615 return 0;
2616
2617 if (!check_irq_usage(curr, prev, next))
2618 return 0;
2619
2620 /*
2621 * For recursive read-locks we do all the dependency checks,
2622 * but we dont store read-triggered dependencies (only
2623 * write-triggered dependencies). This ensures that only the
2624 * write-side dependencies matter, and that if for example a
2625 * write-lock never takes any other locks, then the reads are
2626 * equivalent to a NOP.
2627 */
2628 if (next->read == 2 || prev->read == 2)
2629 return 1;
2630 /*
2631 * Is the <prev> -> <next> dependency already present?
2632 *
2633 * (this may occur even though this is a new chain: consider
2634 * e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
2635 * chains - the second one will be new, but L1 already has
2636 * L2 added to its dependency list, due to the first chain.)
2637 */
2638 list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
2639 if (entry->class == hlock_class(next)) {
2640 if (distance == 1)
2641 entry->distance = 1;
2642 return 1;
2643 }
2644 }
2645
2646#ifdef CONFIG_LOCKDEP_SMALL
2647 /*
2648 * Is the <prev> -> <next> link redundant?
2649 */
2650 ret = check_redundant(prev, next);
2651 if (ret != 1)
2652 return ret;
2653#endif
2654
2655 if (!*trace) {
2656 *trace = save_trace();
2657 if (!*trace)
2658 return 0;
2659 }
2660
2661 /*
2662 * Ok, all validations passed, add the new lock
2663 * to the previous lock's dependency list:
2664 */
2665 ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
2666 &hlock_class(prev)->locks_after,
2667 next->acquire_ip, distance, *trace);
2668
2669 if (!ret)
2670 return 0;
2671
2672 ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
2673 &hlock_class(next)->locks_before,
2674 next->acquire_ip, distance, *trace);
2675 if (!ret)
2676 return 0;
2677
2678 return 2;
2679}
2680
2681/*
2682 * Add the dependency to all directly-previous locks that are 'relevant'.
2683 * The ones that are relevant are (in increasing distance from curr):
2684 * all consecutive trylock entries and the final non-trylock entry - or
2685 * the end of this context's lock-chain - whichever comes first.
2686 */
2687static int
2688check_prevs_add(struct task_struct *curr, struct held_lock *next)
2689{
2690 struct lock_trace *trace = NULL;
2691 int depth = curr->lockdep_depth;
2692 struct held_lock *hlock;
2693
2694 /*
2695 * Debugging checks.
2696 *
2697 * Depth must not be zero for a non-head lock:
2698 */
2699 if (!depth)
2700 goto out_bug;
2701 /*
2702 * At least two relevant locks must exist for this
2703 * to be a head:
2704 */
2705 if (curr->held_locks[depth].irq_context !=
2706 curr->held_locks[depth-1].irq_context)
2707 goto out_bug;
2708
2709 for (;;) {
2710 int distance = curr->lockdep_depth - depth + 1;
2711 hlock = curr->held_locks + depth - 1;
2712
2713 /*
2714 * Only non-recursive-read entries get new dependencies
2715 * added:
2716 */
2717 if (hlock->read != 2 && hlock->check) {
2718 int ret = check_prev_add(curr, hlock, next, distance,
2719 &trace);
2720 if (!ret)
2721 return 0;
2722
2723 /*
2724 * Stop after the first non-trylock entry,
2725 * as non-trylock entries have added their
2726 * own direct dependencies already, so this
2727 * lock is connected to them indirectly:
2728 */
2729 if (!hlock->trylock)
2730 break;
2731 }
2732
2733 depth--;
2734 /*
2735 * End of lock-stack?
2736 */
2737 if (!depth)
2738 break;
2739 /*
2740 * Stop the search if we cross into another context:
2741 */
2742 if (curr->held_locks[depth].irq_context !=
2743 curr->held_locks[depth-1].irq_context)
2744 break;
2745 }
2746 return 1;
2747out_bug:
2748 if (!debug_locks_off_graph_unlock())
2749 return 0;
2750
2751 /*
2752 * Clearly we all shouldn't be here, but since we made it we
2753 * can reliable say we messed up our state. See the above two
2754 * gotos for reasons why we could possibly end up here.
2755 */
2756 WARN_ON(1);
2757
2758 return 0;
2759}
2760
2761struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
2762static DECLARE_BITMAP(lock_chains_in_use, MAX_LOCKDEP_CHAINS);
2763int nr_chain_hlocks;
2764static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
2765
2766struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
2767{
2768 return lock_classes + chain_hlocks[chain->base + i];
2769}
2770
2771/*
2772 * Returns the index of the first held_lock of the current chain
2773 */
2774static inline int get_first_held_lock(struct task_struct *curr,
2775 struct held_lock *hlock)
2776{
2777 int i;
2778 struct held_lock *hlock_curr;
2779
2780 for (i = curr->lockdep_depth - 1; i >= 0; i--) {
2781 hlock_curr = curr->held_locks + i;
2782 if (hlock_curr->irq_context != hlock->irq_context)
2783 break;
2784
2785 }
2786
2787 return ++i;
2788}
2789
2790#ifdef CONFIG_DEBUG_LOCKDEP
2791/*
2792 * Returns the next chain_key iteration
2793 */
2794static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
2795{
2796 u64 new_chain_key = iterate_chain_key(chain_key, class_idx);
2797
2798 printk(" class_idx:%d -> chain_key:%016Lx",
2799 class_idx,
2800 (unsigned long long)new_chain_key);
2801 return new_chain_key;
2802}
2803
2804static void
2805print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next)
2806{
2807 struct held_lock *hlock;
2808 u64 chain_key = INITIAL_CHAIN_KEY;
2809 int depth = curr->lockdep_depth;
2810 int i = get_first_held_lock(curr, hlock_next);
2811
2812 printk("depth: %u (irq_context %u)\n", depth - i + 1,
2813 hlock_next->irq_context);
2814 for (; i < depth; i++) {
2815 hlock = curr->held_locks + i;
2816 chain_key = print_chain_key_iteration(hlock->class_idx, chain_key);
2817
2818 print_lock(hlock);
2819 }
2820
2821 print_chain_key_iteration(hlock_next->class_idx, chain_key);
2822 print_lock(hlock_next);
2823}
2824
2825static void print_chain_keys_chain(struct lock_chain *chain)
2826{
2827 int i;
2828 u64 chain_key = INITIAL_CHAIN_KEY;
2829 int class_id;
2830
2831 printk("depth: %u\n", chain->depth);
2832 for (i = 0; i < chain->depth; i++) {
2833 class_id = chain_hlocks[chain->base + i];
2834 chain_key = print_chain_key_iteration(class_id, chain_key);
2835
2836 print_lock_name(lock_classes + class_id);
2837 printk("\n");
2838 }
2839}
2840
2841static void print_collision(struct task_struct *curr,
2842 struct held_lock *hlock_next,
2843 struct lock_chain *chain)
2844{
2845 pr_warn("\n");
2846 pr_warn("============================\n");
2847 pr_warn("WARNING: chain_key collision\n");
2848 print_kernel_ident();
2849 pr_warn("----------------------------\n");
2850 pr_warn("%s/%d: ", current->comm, task_pid_nr(current));
2851 pr_warn("Hash chain already cached but the contents don't match!\n");
2852
2853 pr_warn("Held locks:");
2854 print_chain_keys_held_locks(curr, hlock_next);
2855
2856 pr_warn("Locks in cached chain:");
2857 print_chain_keys_chain(chain);
2858
2859 pr_warn("\nstack backtrace:\n");
2860 dump_stack();
2861}
2862#endif
2863
2864/*
2865 * Checks whether the chain and the current held locks are consistent
2866 * in depth and also in content. If they are not it most likely means
2867 * that there was a collision during the calculation of the chain_key.
2868 * Returns: 0 not passed, 1 passed
2869 */
2870static int check_no_collision(struct task_struct *curr,
2871 struct held_lock *hlock,
2872 struct lock_chain *chain)
2873{
2874#ifdef CONFIG_DEBUG_LOCKDEP
2875 int i, j, id;
2876
2877 i = get_first_held_lock(curr, hlock);
2878
2879 if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) {
2880 print_collision(curr, hlock, chain);
2881 return 0;
2882 }
2883
2884 for (j = 0; j < chain->depth - 1; j++, i++) {
2885 id = curr->held_locks[i].class_idx;
2886
2887 if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
2888 print_collision(curr, hlock, chain);
2889 return 0;
2890 }
2891 }
2892#endif
2893 return 1;
2894}
2895
2896/*
2897 * Given an index that is >= -1, return the index of the next lock chain.
2898 * Return -2 if there is no next lock chain.
2899 */
2900long lockdep_next_lockchain(long i)
2901{
2902 i = find_next_bit(lock_chains_in_use, ARRAY_SIZE(lock_chains), i + 1);
2903 return i < ARRAY_SIZE(lock_chains) ? i : -2;
2904}
2905
2906unsigned long lock_chain_count(void)
2907{
2908 return bitmap_weight(lock_chains_in_use, ARRAY_SIZE(lock_chains));
2909}
2910
2911/* Must be called with the graph lock held. */
2912static struct lock_chain *alloc_lock_chain(void)
2913{
2914 int idx = find_first_zero_bit(lock_chains_in_use,
2915 ARRAY_SIZE(lock_chains));
2916
2917 if (unlikely(idx >= ARRAY_SIZE(lock_chains)))
2918 return NULL;
2919 __set_bit(idx, lock_chains_in_use);
2920 return lock_chains + idx;
2921}
2922
2923/*
2924 * Adds a dependency chain into chain hashtable. And must be called with
2925 * graph_lock held.
2926 *
2927 * Return 0 if fail, and graph_lock is released.
2928 * Return 1 if succeed, with graph_lock held.
2929 */
2930static inline int add_chain_cache(struct task_struct *curr,
2931 struct held_lock *hlock,
2932 u64 chain_key)
2933{
2934 struct lock_class *class = hlock_class(hlock);
2935 struct hlist_head *hash_head = chainhashentry(chain_key);
2936 struct lock_chain *chain;
2937 int i, j;
2938
2939 /*
2940 * The caller must hold the graph lock, ensure we've got IRQs
2941 * disabled to make this an IRQ-safe lock.. for recursion reasons
2942 * lockdep won't complain about its own locking errors.
2943 */
2944 if (lockdep_assert_locked())
2945 return 0;
2946
2947 chain = alloc_lock_chain();
2948 if (!chain) {
2949 if (!debug_locks_off_graph_unlock())
2950 return 0;
2951
2952 print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
2953 dump_stack();
2954 return 0;
2955 }
2956 chain->chain_key = chain_key;
2957 chain->irq_context = hlock->irq_context;
2958 i = get_first_held_lock(curr, hlock);
2959 chain->depth = curr->lockdep_depth + 1 - i;
2960
2961 BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks));
2962 BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr->held_locks));
2963 BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes));
2964
2965 if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
2966 chain->base = nr_chain_hlocks;
2967 for (j = 0; j < chain->depth - 1; j++, i++) {
2968 int lock_id = curr->held_locks[i].class_idx;
2969 chain_hlocks[chain->base + j] = lock_id;
2970 }
2971 chain_hlocks[chain->base + j] = class - lock_classes;
2972 nr_chain_hlocks += chain->depth;
2973 } else {
2974 if (!debug_locks_off_graph_unlock())
2975 return 0;
2976
2977 print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
2978 dump_stack();
2979 return 0;
2980 }
2981
2982 hlist_add_head_rcu(&chain->entry, hash_head);
2983 debug_atomic_inc(chain_lookup_misses);
2984 inc_chains(chain->irq_context);
2985
2986 return 1;
2987}
2988
2989/*
2990 * Look up a dependency chain. Must be called with either the graph lock or
2991 * the RCU read lock held.
2992 */
2993static inline struct lock_chain *lookup_chain_cache(u64 chain_key)
2994{
2995 struct hlist_head *hash_head = chainhashentry(chain_key);
2996 struct lock_chain *chain;
2997
2998 hlist_for_each_entry_rcu(chain, hash_head, entry) {
2999 if (READ_ONCE(chain->chain_key) == chain_key) {
3000 debug_atomic_inc(chain_lookup_hits);
3001 return chain;
3002 }
3003 }
3004 return NULL;
3005}
3006
3007/*
3008 * If the key is not present yet in dependency chain cache then
3009 * add it and return 1 - in this case the new dependency chain is
3010 * validated. If the key is already hashed, return 0.
3011 * (On return with 1 graph_lock is held.)
3012 */
3013static inline int lookup_chain_cache_add(struct task_struct *curr,
3014 struct held_lock *hlock,
3015 u64 chain_key)
3016{
3017 struct lock_class *class = hlock_class(hlock);
3018 struct lock_chain *chain = lookup_chain_cache(chain_key);
3019
3020 if (chain) {
3021cache_hit:
3022 if (!check_no_collision(curr, hlock, chain))
3023 return 0;
3024
3025 if (very_verbose(class)) {
3026 printk("\nhash chain already cached, key: "
3027 "%016Lx tail class: [%px] %s\n",
3028 (unsigned long long)chain_key,
3029 class->key, class->name);
3030 }
3031
3032 return 0;
3033 }
3034
3035 if (very_verbose(class)) {
3036 printk("\nnew hash chain, key: %016Lx tail class: [%px] %s\n",
3037 (unsigned long long)chain_key, class->key, class->name);
3038 }
3039
3040 if (!graph_lock())
3041 return 0;
3042
3043 /*
3044 * We have to walk the chain again locked - to avoid duplicates:
3045 */
3046 chain = lookup_chain_cache(chain_key);
3047 if (chain) {
3048 graph_unlock();
3049 goto cache_hit;
3050 }
3051
3052 if (!add_chain_cache(curr, hlock, chain_key))
3053 return 0;
3054
3055 return 1;
3056}
3057
3058static int validate_chain(struct task_struct *curr,
3059 struct held_lock *hlock,
3060 int chain_head, u64 chain_key)
3061{
3062 /*
3063 * Trylock needs to maintain the stack of held locks, but it
3064 * does not add new dependencies, because trylock can be done
3065 * in any order.
3066 *
3067 * We look up the chain_key and do the O(N^2) check and update of
3068 * the dependencies only if this is a new dependency chain.
3069 * (If lookup_chain_cache_add() return with 1 it acquires
3070 * graph_lock for us)
3071 */
3072 if (!hlock->trylock && hlock->check &&
3073 lookup_chain_cache_add(curr, hlock, chain_key)) {
3074 /*
3075 * Check whether last held lock:
3076 *
3077 * - is irq-safe, if this lock is irq-unsafe
3078 * - is softirq-safe, if this lock is hardirq-unsafe
3079 *
3080 * And check whether the new lock's dependency graph
3081 * could lead back to the previous lock:
3082 *
3083 * - within the current held-lock stack
3084 * - across our accumulated lock dependency records
3085 *
3086 * any of these scenarios could lead to a deadlock.
3087 */
3088 /*
3089 * The simple case: does the current hold the same lock
3090 * already?
3091 */
3092 int ret = check_deadlock(curr, hlock);
3093
3094 if (!ret)
3095 return 0;
3096 /*
3097 * Mark recursive read, as we jump over it when
3098 * building dependencies (just like we jump over
3099 * trylock entries):
3100 */
3101 if (ret == 2)
3102 hlock->read = 2;
3103 /*
3104 * Add dependency only if this lock is not the head
3105 * of the chain, and if it's not a secondary read-lock:
3106 */
3107 if (!chain_head && ret != 2) {
3108 if (!check_prevs_add(curr, hlock))
3109 return 0;
3110 }
3111
3112 graph_unlock();
3113 } else {
3114 /* after lookup_chain_cache_add(): */
3115 if (unlikely(!debug_locks))
3116 return 0;
3117 }
3118
3119 return 1;
3120}
3121#else
3122static inline int validate_chain(struct task_struct *curr,
3123 struct held_lock *hlock,
3124 int chain_head, u64 chain_key)
3125{
3126 return 1;
3127}
3128#endif /* CONFIG_PROVE_LOCKING */
3129
3130/*
3131 * We are building curr_chain_key incrementally, so double-check
3132 * it from scratch, to make sure that it's done correctly:
3133 */
3134static void check_chain_key(struct task_struct *curr)
3135{
3136#ifdef CONFIG_DEBUG_LOCKDEP
3137 struct held_lock *hlock, *prev_hlock = NULL;
3138 unsigned int i;
3139 u64 chain_key = INITIAL_CHAIN_KEY;
3140
3141 for (i = 0; i < curr->lockdep_depth; i++) {
3142 hlock = curr->held_locks + i;
3143 if (chain_key != hlock->prev_chain_key) {
3144 debug_locks_off();
3145 /*
3146 * We got mighty confused, our chain keys don't match
3147 * with what we expect, someone trample on our task state?
3148 */
3149 WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
3150 curr->lockdep_depth, i,
3151 (unsigned long long)chain_key,
3152 (unsigned long long)hlock->prev_chain_key);
3153 return;
3154 }
3155
3156 /*
3157 * hlock->class_idx can't go beyond MAX_LOCKDEP_KEYS, but is
3158 * it registered lock class index?
3159 */
3160 if (DEBUG_LOCKS_WARN_ON(!test_bit(hlock->class_idx, lock_classes_in_use)))
3161 return;
3162
3163 if (prev_hlock && (prev_hlock->irq_context !=
3164 hlock->irq_context))
3165 chain_key = INITIAL_CHAIN_KEY;
3166 chain_key = iterate_chain_key(chain_key, hlock->class_idx);
3167 prev_hlock = hlock;
3168 }
3169 if (chain_key != curr->curr_chain_key) {
3170 debug_locks_off();
3171 /*
3172 * More smoking hash instead of calculating it, damn see these
3173 * numbers float.. I bet that a pink elephant stepped on my memory.
3174 */
3175 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
3176 curr->lockdep_depth, i,
3177 (unsigned long long)chain_key,
3178 (unsigned long long)curr->curr_chain_key);
3179 }
3180#endif
3181}
3182
3183#ifdef CONFIG_PROVE_LOCKING
3184static int mark_lock(struct task_struct *curr, struct held_lock *this,
3185 enum lock_usage_bit new_bit);
3186
3187static void print_usage_bug_scenario(struct held_lock *lock)
3188{
3189 struct lock_class *class = hlock_class(lock);
3190
3191 printk(" Possible unsafe locking scenario:\n\n");
3192 printk(" CPU0\n");
3193 printk(" ----\n");
3194 printk(" lock(");
3195 __print_lock_name(class);
3196 printk(KERN_CONT ");\n");
3197 printk(" <Interrupt>\n");
3198 printk(" lock(");
3199 __print_lock_name(class);
3200 printk(KERN_CONT ");\n");
3201 printk("\n *** DEADLOCK ***\n\n");
3202}
3203
3204static void
3205print_usage_bug(struct task_struct *curr, struct held_lock *this,
3206 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
3207{
3208 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
3209 return;
3210
3211 pr_warn("\n");
3212 pr_warn("================================\n");
3213 pr_warn("WARNING: inconsistent lock state\n");
3214 print_kernel_ident();
3215 pr_warn("--------------------------------\n");
3216
3217 pr_warn("inconsistent {%s} -> {%s} usage.\n",
3218 usage_str[prev_bit], usage_str[new_bit]);
3219
3220 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
3221 curr->comm, task_pid_nr(curr),
3222 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
3223 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
3224 trace_hardirqs_enabled(curr),
3225 trace_softirqs_enabled(curr));
3226 print_lock(this);
3227
3228 pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
3229 print_lock_trace(hlock_class(this)->usage_traces[prev_bit], 1);
3230
3231 print_irqtrace_events(curr);
3232 pr_warn("\nother info that might help us debug this:\n");
3233 print_usage_bug_scenario(this);
3234
3235 lockdep_print_held_locks(curr);
3236
3237 pr_warn("\nstack backtrace:\n");
3238 dump_stack();
3239}
3240
3241/*
3242 * Print out an error if an invalid bit is set:
3243 */
3244static inline int
3245valid_state(struct task_struct *curr, struct held_lock *this,
3246 enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
3247{
3248 if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) {
3249 print_usage_bug(curr, this, bad_bit, new_bit);
3250 return 0;
3251 }
3252 return 1;
3253}
3254
3255
3256/*
3257 * print irq inversion bug:
3258 */
3259static void
3260print_irq_inversion_bug(struct task_struct *curr,
3261 struct lock_list *root, struct lock_list *other,
3262 struct held_lock *this, int forwards,
3263 const char *irqclass)
3264{
3265 struct lock_list *entry = other;
3266 struct lock_list *middle = NULL;
3267 int depth;
3268
3269 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
3270 return;
3271
3272 pr_warn("\n");
3273 pr_warn("========================================================\n");
3274 pr_warn("WARNING: possible irq lock inversion dependency detected\n");
3275 print_kernel_ident();
3276 pr_warn("--------------------------------------------------------\n");
3277 pr_warn("%s/%d just changed the state of lock:\n",
3278 curr->comm, task_pid_nr(curr));
3279 print_lock(this);
3280 if (forwards)
3281 pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
3282 else
3283 pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
3284 print_lock_name(other->class);
3285 pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n");
3286
3287 pr_warn("\nother info that might help us debug this:\n");
3288
3289 /* Find a middle lock (if one exists) */
3290 depth = get_lock_depth(other);
3291 do {
3292 if (depth == 0 && (entry != root)) {
3293 pr_warn("lockdep:%s bad path found in chain graph\n", __func__);
3294 break;
3295 }
3296 middle = entry;
3297 entry = get_lock_parent(entry);
3298 depth--;
3299 } while (entry && entry != root && (depth >= 0));
3300 if (forwards)
3301 print_irq_lock_scenario(root, other,
3302 middle ? middle->class : root->class, other->class);
3303 else
3304 print_irq_lock_scenario(other, root,
3305 middle ? middle->class : other->class, root->class);
3306
3307 lockdep_print_held_locks(curr);
3308
3309 pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
3310 root->trace = save_trace();
3311 if (!root->trace)
3312 return;
3313 print_shortest_lock_dependencies(other, root);
3314
3315 pr_warn("\nstack backtrace:\n");
3316 dump_stack();
3317}
3318
3319/*
3320 * Prove that in the forwards-direction subgraph starting at <this>
3321 * there is no lock matching <mask>:
3322 */
3323static int
3324check_usage_forwards(struct task_struct *curr, struct held_lock *this,
3325 enum lock_usage_bit bit, const char *irqclass)
3326{
3327 int ret;
3328 struct lock_list root;
3329 struct lock_list *uninitialized_var(target_entry);
3330
3331 root.parent = NULL;
3332 root.class = hlock_class(this);
3333 ret = find_usage_forwards(&root, lock_flag(bit), &target_entry);
3334 if (ret < 0) {
3335 print_bfs_bug(ret);
3336 return 0;
3337 }
3338 if (ret == 1)
3339 return ret;
3340
3341 print_irq_inversion_bug(curr, &root, target_entry,
3342 this, 1, irqclass);
3343 return 0;
3344}
3345
3346/*
3347 * Prove that in the backwards-direction subgraph starting at <this>
3348 * there is no lock matching <mask>:
3349 */
3350static int
3351check_usage_backwards(struct task_struct *curr, struct held_lock *this,
3352 enum lock_usage_bit bit, const char *irqclass)
3353{
3354 int ret;
3355 struct lock_list root;
3356 struct lock_list *target_entry;
3357
3358 root.parent = NULL;
3359 root.class = hlock_class(this);
3360 ret = find_usage_backwards(&root, lock_flag(bit), &target_entry);
3361 if (ret < 0) {
3362 print_bfs_bug(ret);
3363 return 0;
3364 }
3365 if (ret == 1)
3366 return ret;
3367
3368 print_irq_inversion_bug(curr, &root, target_entry,
3369 this, 0, irqclass);
3370 return 0;
3371}
3372
3373void print_irqtrace_events(struct task_struct *curr)
3374{
3375 printk("irq event stamp: %u\n", curr->irq_events);
3376 printk("hardirqs last enabled at (%u): [<%px>] %pS\n",
3377 curr->hardirq_enable_event, (void *)curr->hardirq_enable_ip,
3378 (void *)curr->hardirq_enable_ip);
3379 printk("hardirqs last disabled at (%u): [<%px>] %pS\n",
3380 curr->hardirq_disable_event, (void *)curr->hardirq_disable_ip,
3381 (void *)curr->hardirq_disable_ip);
3382 printk("softirqs last enabled at (%u): [<%px>] %pS\n",
3383 curr->softirq_enable_event, (void *)curr->softirq_enable_ip,
3384 (void *)curr->softirq_enable_ip);
3385 printk("softirqs last disabled at (%u): [<%px>] %pS\n",
3386 curr->softirq_disable_event, (void *)curr->softirq_disable_ip,
3387 (void *)curr->softirq_disable_ip);
3388}
3389
3390static int HARDIRQ_verbose(struct lock_class *class)
3391{
3392#if HARDIRQ_VERBOSE
3393 return class_filter(class);
3394#endif
3395 return 0;
3396}
3397
3398static int SOFTIRQ_verbose(struct lock_class *class)
3399{
3400#if SOFTIRQ_VERBOSE
3401 return class_filter(class);
3402#endif
3403 return 0;
3404}
3405
3406#define STRICT_READ_CHECKS 1
3407
3408static int (*state_verbose_f[])(struct lock_class *class) = {
3409#define LOCKDEP_STATE(__STATE) \
3410 __STATE##_verbose,
3411#include "lockdep_states.h"
3412#undef LOCKDEP_STATE
3413};
3414
3415static inline int state_verbose(enum lock_usage_bit bit,
3416 struct lock_class *class)
3417{
3418 return state_verbose_f[bit >> LOCK_USAGE_DIR_MASK](class);
3419}
3420
3421typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
3422 enum lock_usage_bit bit, const char *name);
3423
3424static int
3425mark_lock_irq(struct task_struct *curr, struct held_lock *this,
3426 enum lock_usage_bit new_bit)
3427{
3428 int excl_bit = exclusive_bit(new_bit);
3429 int read = new_bit & LOCK_USAGE_READ_MASK;
3430 int dir = new_bit & LOCK_USAGE_DIR_MASK;
3431
3432 /*
3433 * mark USED_IN has to look forwards -- to ensure no dependency
3434 * has ENABLED state, which would allow recursion deadlocks.
3435 *
3436 * mark ENABLED has to look backwards -- to ensure no dependee
3437 * has USED_IN state, which, again, would allow recursion deadlocks.
3438 */
3439 check_usage_f usage = dir ?
3440 check_usage_backwards : check_usage_forwards;
3441
3442 /*
3443 * Validate that this particular lock does not have conflicting
3444 * usage states.
3445 */
3446 if (!valid_state(curr, this, new_bit, excl_bit))
3447 return 0;
3448
3449 /*
3450 * Validate that the lock dependencies don't have conflicting usage
3451 * states.
3452 */
3453 if ((!read || STRICT_READ_CHECKS) &&
3454 !usage(curr, this, excl_bit, state_name(new_bit & ~LOCK_USAGE_READ_MASK)))
3455 return 0;
3456
3457 /*
3458 * Check for read in write conflicts
3459 */
3460 if (!read) {
3461 if (!valid_state(curr, this, new_bit, excl_bit + LOCK_USAGE_READ_MASK))
3462 return 0;
3463
3464 if (STRICT_READ_CHECKS &&
3465 !usage(curr, this, excl_bit + LOCK_USAGE_READ_MASK,
3466 state_name(new_bit + LOCK_USAGE_READ_MASK)))
3467 return 0;
3468 }
3469
3470 if (state_verbose(new_bit, hlock_class(this)))
3471 return 2;
3472
3473 return 1;
3474}
3475
3476/*
3477 * Mark all held locks with a usage bit:
3478 */
3479static int
3480mark_held_locks(struct task_struct *curr, enum lock_usage_bit base_bit)
3481{
3482 struct held_lock *hlock;
3483 int i;
3484
3485 for (i = 0; i < curr->lockdep_depth; i++) {
3486 enum lock_usage_bit hlock_bit = base_bit;
3487 hlock = curr->held_locks + i;
3488
3489 if (hlock->read)
3490 hlock_bit += LOCK_USAGE_READ_MASK;
3491
3492 BUG_ON(hlock_bit >= LOCK_USAGE_STATES);
3493
3494 if (!hlock->check)
3495 continue;
3496
3497 if (!mark_lock(curr, hlock, hlock_bit))
3498 return 0;
3499 }
3500
3501 return 1;
3502}
3503
3504/*
3505 * Hardirqs will be enabled:
3506 */
3507static void __trace_hardirqs_on_caller(unsigned long ip)
3508{
3509 struct task_struct *curr = current;
3510
3511 /* we'll do an OFF -> ON transition: */
3512 curr->hardirqs_enabled = 1;
3513
3514 /*
3515 * We are going to turn hardirqs on, so set the
3516 * usage bit for all held locks:
3517 */
3518 if (!mark_held_locks(curr, LOCK_ENABLED_HARDIRQ))
3519 return;
3520 /*
3521 * If we have softirqs enabled, then set the usage
3522 * bit for all held locks. (disabled hardirqs prevented
3523 * this bit from being set before)
3524 */
3525 if (curr->softirqs_enabled)
3526 if (!mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ))
3527 return;
3528
3529 curr->hardirq_enable_ip = ip;
3530 curr->hardirq_enable_event = ++curr->irq_events;
3531 debug_atomic_inc(hardirqs_on_events);
3532}
3533
3534void lockdep_hardirqs_on(unsigned long ip)
3535{
3536 if (unlikely(!debug_locks || current->lockdep_recursion))
3537 return;
3538
3539 if (unlikely(current->hardirqs_enabled)) {
3540 /*
3541 * Neither irq nor preemption are disabled here
3542 * so this is racy by nature but losing one hit
3543 * in a stat is not a big deal.
3544 */
3545 __debug_atomic_inc(redundant_hardirqs_on);
3546 return;
3547 }
3548
3549 /*
3550 * We're enabling irqs and according to our state above irqs weren't
3551 * already enabled, yet we find the hardware thinks they are in fact
3552 * enabled.. someone messed up their IRQ state tracing.
3553 */
3554 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3555 return;
3556
3557 /*
3558 * See the fine text that goes along with this variable definition.
3559 */
3560 if (DEBUG_LOCKS_WARN_ON(early_boot_irqs_disabled))
3561 return;
3562
3563 /*
3564 * Can't allow enabling interrupts while in an interrupt handler,
3565 * that's general bad form and such. Recursion, limited stack etc..
3566 */
3567 if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
3568 return;
3569
3570 current->lockdep_recursion++;
3571 __trace_hardirqs_on_caller(ip);
3572 lockdep_recursion_finish();
3573}
3574NOKPROBE_SYMBOL(lockdep_hardirqs_on);
3575
3576/*
3577 * Hardirqs were disabled:
3578 */
3579void lockdep_hardirqs_off(unsigned long ip)
3580{
3581 struct task_struct *curr = current;
3582
3583 if (unlikely(!debug_locks || current->lockdep_recursion))
3584 return;
3585
3586 /*
3587 * So we're supposed to get called after you mask local IRQs, but for
3588 * some reason the hardware doesn't quite think you did a proper job.
3589 */
3590 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3591 return;
3592
3593 if (curr->hardirqs_enabled) {
3594 /*
3595 * We have done an ON -> OFF transition:
3596 */
3597 curr->hardirqs_enabled = 0;
3598 curr->hardirq_disable_ip = ip;
3599 curr->hardirq_disable_event = ++curr->irq_events;
3600 debug_atomic_inc(hardirqs_off_events);
3601 } else
3602 debug_atomic_inc(redundant_hardirqs_off);
3603}
3604NOKPROBE_SYMBOL(lockdep_hardirqs_off);
3605
3606/*
3607 * Softirqs will be enabled:
3608 */
3609void trace_softirqs_on(unsigned long ip)
3610{
3611 struct task_struct *curr = current;
3612
3613 if (unlikely(!debug_locks || current->lockdep_recursion))
3614 return;
3615
3616 /*
3617 * We fancy IRQs being disabled here, see softirq.c, avoids
3618 * funny state and nesting things.
3619 */
3620 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3621 return;
3622
3623 if (curr->softirqs_enabled) {
3624 debug_atomic_inc(redundant_softirqs_on);
3625 return;
3626 }
3627
3628 current->lockdep_recursion++;
3629 /*
3630 * We'll do an OFF -> ON transition:
3631 */
3632 curr->softirqs_enabled = 1;
3633 curr->softirq_enable_ip = ip;
3634 curr->softirq_enable_event = ++curr->irq_events;
3635 debug_atomic_inc(softirqs_on_events);
3636 /*
3637 * We are going to turn softirqs on, so set the
3638 * usage bit for all held locks, if hardirqs are
3639 * enabled too:
3640 */
3641 if (curr->hardirqs_enabled)
3642 mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ);
3643 lockdep_recursion_finish();
3644}
3645
3646/*
3647 * Softirqs were disabled:
3648 */
3649void trace_softirqs_off(unsigned long ip)
3650{
3651 struct task_struct *curr = current;
3652
3653 if (unlikely(!debug_locks || current->lockdep_recursion))
3654 return;
3655
3656 /*
3657 * We fancy IRQs being disabled here, see softirq.c
3658 */
3659 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3660 return;
3661
3662 if (curr->softirqs_enabled) {
3663 /*
3664 * We have done an ON -> OFF transition:
3665 */
3666 curr->softirqs_enabled = 0;
3667 curr->softirq_disable_ip = ip;
3668 curr->softirq_disable_event = ++curr->irq_events;
3669 debug_atomic_inc(softirqs_off_events);
3670 /*
3671 * Whoops, we wanted softirqs off, so why aren't they?
3672 */
3673 DEBUG_LOCKS_WARN_ON(!softirq_count());
3674 } else
3675 debug_atomic_inc(redundant_softirqs_off);
3676}
3677
3678static int
3679mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
3680{
3681 if (!check)
3682 goto lock_used;
3683
3684 /*
3685 * If non-trylock use in a hardirq or softirq context, then
3686 * mark the lock as used in these contexts:
3687 */
3688 if (!hlock->trylock) {
3689 if (hlock->read) {
3690 if (curr->hardirq_context)
3691 if (!mark_lock(curr, hlock,
3692 LOCK_USED_IN_HARDIRQ_READ))
3693 return 0;
3694 if (curr->softirq_context)
3695 if (!mark_lock(curr, hlock,
3696 LOCK_USED_IN_SOFTIRQ_READ))
3697 return 0;
3698 } else {
3699 if (curr->hardirq_context)
3700 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
3701 return 0;
3702 if (curr->softirq_context)
3703 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
3704 return 0;
3705 }
3706 }
3707 if (!hlock->hardirqs_off) {
3708 if (hlock->read) {
3709 if (!mark_lock(curr, hlock,
3710 LOCK_ENABLED_HARDIRQ_READ))
3711 return 0;
3712 if (curr->softirqs_enabled)
3713 if (!mark_lock(curr, hlock,
3714 LOCK_ENABLED_SOFTIRQ_READ))
3715 return 0;
3716 } else {
3717 if (!mark_lock(curr, hlock,
3718 LOCK_ENABLED_HARDIRQ))
3719 return 0;
3720 if (curr->softirqs_enabled)
3721 if (!mark_lock(curr, hlock,
3722 LOCK_ENABLED_SOFTIRQ))
3723 return 0;
3724 }
3725 }
3726
3727lock_used:
3728 /* mark it as used: */
3729 if (!mark_lock(curr, hlock, LOCK_USED))
3730 return 0;
3731
3732 return 1;
3733}
3734
3735static inline unsigned int task_irq_context(struct task_struct *task)
3736{
3737 return LOCK_CHAIN_HARDIRQ_CONTEXT * !!task->hardirq_context +
3738 LOCK_CHAIN_SOFTIRQ_CONTEXT * !!task->softirq_context;
3739}
3740
3741static int separate_irq_context(struct task_struct *curr,
3742 struct held_lock *hlock)
3743{
3744 unsigned int depth = curr->lockdep_depth;
3745
3746 /*
3747 * Keep track of points where we cross into an interrupt context:
3748 */
3749 if (depth) {
3750 struct held_lock *prev_hlock;
3751
3752 prev_hlock = curr->held_locks + depth-1;
3753 /*
3754 * If we cross into another context, reset the
3755 * hash key (this also prevents the checking and the
3756 * adding of the dependency to 'prev'):
3757 */
3758 if (prev_hlock->irq_context != hlock->irq_context)
3759 return 1;
3760 }
3761 return 0;
3762}
3763
3764/*
3765 * Mark a lock with a usage bit, and validate the state transition:
3766 */
3767static int mark_lock(struct task_struct *curr, struct held_lock *this,
3768 enum lock_usage_bit new_bit)
3769{
3770 unsigned int new_mask = 1 << new_bit, ret = 1;
3771
3772 if (new_bit >= LOCK_USAGE_STATES) {
3773 DEBUG_LOCKS_WARN_ON(1);
3774 return 0;
3775 }
3776
3777 /*
3778 * If already set then do not dirty the cacheline,
3779 * nor do any checks:
3780 */
3781 if (likely(hlock_class(this)->usage_mask & new_mask))
3782 return 1;
3783
3784 if (!graph_lock())
3785 return 0;
3786 /*
3787 * Make sure we didn't race:
3788 */
3789 if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
3790 graph_unlock();
3791 return 1;
3792 }
3793
3794 hlock_class(this)->usage_mask |= new_mask;
3795
3796 if (!(hlock_class(this)->usage_traces[new_bit] = save_trace()))
3797 return 0;
3798
3799 switch (new_bit) {
3800 case LOCK_USED:
3801 debug_atomic_dec(nr_unused_locks);
3802 break;
3803 default:
3804 ret = mark_lock_irq(curr, this, new_bit);
3805 if (!ret)
3806 return 0;
3807 }
3808
3809 graph_unlock();
3810
3811 /*
3812 * We must printk outside of the graph_lock:
3813 */
3814 if (ret == 2) {
3815 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
3816 print_lock(this);
3817 print_irqtrace_events(curr);
3818 dump_stack();
3819 }
3820
3821 return ret;
3822}
3823
3824#else /* CONFIG_PROVE_LOCKING */
3825
3826static inline int
3827mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
3828{
3829 return 1;
3830}
3831
3832static inline unsigned int task_irq_context(struct task_struct *task)
3833{
3834 return 0;
3835}
3836
3837static inline int separate_irq_context(struct task_struct *curr,
3838 struct held_lock *hlock)
3839{
3840 return 0;
3841}
3842
3843#endif /* CONFIG_PROVE_LOCKING */
3844
3845/*
3846 * Initialize a lock instance's lock-class mapping info:
3847 */
3848void lockdep_init_map(struct lockdep_map *lock, const char *name,
3849 struct lock_class_key *key, int subclass)
3850{
3851 int i;
3852
3853 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
3854 lock->class_cache[i] = NULL;
3855
3856#ifdef CONFIG_LOCK_STAT
3857 lock->cpu = raw_smp_processor_id();
3858#endif
3859
3860 /*
3861 * Can't be having no nameless bastards around this place!
3862 */
3863 if (DEBUG_LOCKS_WARN_ON(!name)) {
3864 lock->name = "NULL";
3865 return;
3866 }
3867
3868 lock->name = name;
3869
3870 /*
3871 * No key, no joy, we need to hash something.
3872 */
3873 if (DEBUG_LOCKS_WARN_ON(!key))
3874 return;
3875 /*
3876 * Sanity check, the lock-class key must either have been allocated
3877 * statically or must have been registered as a dynamic key.
3878 */
3879 if (!static_obj(key) && !is_dynamic_key(key)) {
3880 if (debug_locks)
3881 printk(KERN_ERR "BUG: key %px has not been registered!\n", key);
3882 DEBUG_LOCKS_WARN_ON(1);
3883 return;
3884 }
3885 lock->key = key;
3886
3887 if (unlikely(!debug_locks))
3888 return;
3889
3890 if (subclass) {
3891 unsigned long flags;
3892
3893 if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
3894 return;
3895
3896 raw_local_irq_save(flags);
3897 current->lockdep_recursion++;
3898 register_lock_class(lock, subclass, 1);
3899 lockdep_recursion_finish();
3900 raw_local_irq_restore(flags);
3901 }
3902}
3903EXPORT_SYMBOL_GPL(lockdep_init_map);
3904
3905struct lock_class_key __lockdep_no_validate__;
3906EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
3907
3908static void
3909print_lock_nested_lock_not_held(struct task_struct *curr,
3910 struct held_lock *hlock,
3911 unsigned long ip)
3912{
3913 if (!debug_locks_off())
3914 return;
3915 if (debug_locks_silent)
3916 return;
3917
3918 pr_warn("\n");
3919 pr_warn("==================================\n");
3920 pr_warn("WARNING: Nested lock was not taken\n");
3921 print_kernel_ident();
3922 pr_warn("----------------------------------\n");
3923
3924 pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
3925 print_lock(hlock);
3926
3927 pr_warn("\nbut this task is not holding:\n");
3928 pr_warn("%s\n", hlock->nest_lock->name);
3929
3930 pr_warn("\nstack backtrace:\n");
3931 dump_stack();
3932
3933 pr_warn("\nother info that might help us debug this:\n");
3934 lockdep_print_held_locks(curr);
3935
3936 pr_warn("\nstack backtrace:\n");
3937 dump_stack();
3938}
3939
3940static int __lock_is_held(const struct lockdep_map *lock, int read);
3941
3942/*
3943 * This gets called for every mutex_lock*()/spin_lock*() operation.
3944 * We maintain the dependency maps and validate the locking attempt:
3945 *
3946 * The callers must make sure that IRQs are disabled before calling it,
3947 * otherwise we could get an interrupt which would want to take locks,
3948 * which would end up in lockdep again.
3949 */
3950static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3951 int trylock, int read, int check, int hardirqs_off,
3952 struct lockdep_map *nest_lock, unsigned long ip,
3953 int references, int pin_count)
3954{
3955 struct task_struct *curr = current;
3956 struct lock_class *class = NULL;
3957 struct held_lock *hlock;
3958 unsigned int depth;
3959 int chain_head = 0;
3960 int class_idx;
3961 u64 chain_key;
3962
3963 if (unlikely(!debug_locks))
3964 return 0;
3965
3966 if (!prove_locking || lock->key == &__lockdep_no_validate__)
3967 check = 0;
3968
3969 if (subclass < NR_LOCKDEP_CACHING_CLASSES)
3970 class = lock->class_cache[subclass];
3971 /*
3972 * Not cached?
3973 */
3974 if (unlikely(!class)) {
3975 class = register_lock_class(lock, subclass, 0);
3976 if (!class)
3977 return 0;
3978 }
3979
3980 debug_class_ops_inc(class);
3981
3982 if (very_verbose(class)) {
3983 printk("\nacquire class [%px] %s", class->key, class->name);
3984 if (class->name_version > 1)
3985 printk(KERN_CONT "#%d", class->name_version);
3986 printk(KERN_CONT "\n");
3987 dump_stack();
3988 }
3989
3990 /*
3991 * Add the lock to the list of currently held locks.
3992 * (we dont increase the depth just yet, up until the
3993 * dependency checks are done)
3994 */
3995 depth = curr->lockdep_depth;
3996 /*
3997 * Ran out of static storage for our per-task lock stack again have we?
3998 */
3999 if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
4000 return 0;
4001
4002 class_idx = class - lock_classes;
4003
4004 if (depth) {
4005 hlock = curr->held_locks + depth - 1;
4006 if (hlock->class_idx == class_idx && nest_lock) {
4007 if (!references)
4008 references++;
4009
4010 if (!hlock->references)
4011 hlock->references++;
4012
4013 hlock->references += references;
4014
4015 /* Overflow */
4016 if (DEBUG_LOCKS_WARN_ON(hlock->references < references))
4017 return 0;
4018
4019 return 2;
4020 }
4021 }
4022
4023 hlock = curr->held_locks + depth;
4024 /*
4025 * Plain impossible, we just registered it and checked it weren't no
4026 * NULL like.. I bet this mushroom I ate was good!
4027 */
4028 if (DEBUG_LOCKS_WARN_ON(!class))
4029 return 0;
4030 hlock->class_idx = class_idx;
4031 hlock->acquire_ip = ip;
4032 hlock->instance = lock;
4033 hlock->nest_lock = nest_lock;
4034 hlock->irq_context = task_irq_context(curr);
4035 hlock->trylock = trylock;
4036 hlock->read = read;
4037 hlock->check = check;
4038 hlock->hardirqs_off = !!hardirqs_off;
4039 hlock->references = references;
4040#ifdef CONFIG_LOCK_STAT
4041 hlock->waittime_stamp = 0;
4042 hlock->holdtime_stamp = lockstat_clock();
4043#endif
4044 hlock->pin_count = pin_count;
4045
4046 /* Initialize the lock usage bit */
4047 if (!mark_usage(curr, hlock, check))
4048 return 0;
4049
4050 /*
4051 * Calculate the chain hash: it's the combined hash of all the
4052 * lock keys along the dependency chain. We save the hash value
4053 * at every step so that we can get the current hash easily
4054 * after unlock. The chain hash is then used to cache dependency
4055 * results.
4056 *
4057 * The 'key ID' is what is the most compact key value to drive
4058 * the hash, not class->key.
4059 */
4060 /*
4061 * Whoops, we did it again.. class_idx is invalid.
4062 */
4063 if (DEBUG_LOCKS_WARN_ON(!test_bit(class_idx, lock_classes_in_use)))
4064 return 0;
4065
4066 chain_key = curr->curr_chain_key;
4067 if (!depth) {
4068 /*
4069 * How can we have a chain hash when we ain't got no keys?!
4070 */
4071 if (DEBUG_LOCKS_WARN_ON(chain_key != INITIAL_CHAIN_KEY))
4072 return 0;
4073 chain_head = 1;
4074 }
4075
4076 hlock->prev_chain_key = chain_key;
4077 if (separate_irq_context(curr, hlock)) {
4078 chain_key = INITIAL_CHAIN_KEY;
4079 chain_head = 1;
4080 }
4081 chain_key = iterate_chain_key(chain_key, class_idx);
4082
4083 if (nest_lock && !__lock_is_held(nest_lock, -1)) {
4084 print_lock_nested_lock_not_held(curr, hlock, ip);
4085 return 0;
4086 }
4087
4088 if (!debug_locks_silent) {
4089 WARN_ON_ONCE(depth && !hlock_class(hlock - 1)->key);
4090 WARN_ON_ONCE(!hlock_class(hlock)->key);
4091 }
4092
4093 if (!validate_chain(curr, hlock, chain_head, chain_key))
4094 return 0;
4095
4096 curr->curr_chain_key = chain_key;
4097 curr->lockdep_depth++;
4098 check_chain_key(curr);
4099#ifdef CONFIG_DEBUG_LOCKDEP
4100 if (unlikely(!debug_locks))
4101 return 0;
4102#endif
4103 if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
4104 debug_locks_off();
4105 print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!");
4106 printk(KERN_DEBUG "depth: %i max: %lu!\n",
4107 curr->lockdep_depth, MAX_LOCK_DEPTH);
4108
4109 lockdep_print_held_locks(current);
4110 debug_show_all_locks();
4111 dump_stack();
4112
4113 return 0;
4114 }
4115
4116 if (unlikely(curr->lockdep_depth > max_lockdep_depth))
4117 max_lockdep_depth = curr->lockdep_depth;
4118
4119 return 1;
4120}
4121
4122static void print_unlock_imbalance_bug(struct task_struct *curr,
4123 struct lockdep_map *lock,
4124 unsigned long ip)
4125{
4126 if (!debug_locks_off())
4127 return;
4128 if (debug_locks_silent)
4129 return;
4130
4131 pr_warn("\n");
4132 pr_warn("=====================================\n");
4133 pr_warn("WARNING: bad unlock balance detected!\n");
4134 print_kernel_ident();
4135 pr_warn("-------------------------------------\n");
4136 pr_warn("%s/%d is trying to release lock (",
4137 curr->comm, task_pid_nr(curr));
4138 print_lockdep_cache(lock);
4139 pr_cont(") at:\n");
4140 print_ip_sym(ip);
4141 pr_warn("but there are no more locks to release!\n");
4142 pr_warn("\nother info that might help us debug this:\n");
4143 lockdep_print_held_locks(curr);
4144
4145 pr_warn("\nstack backtrace:\n");
4146 dump_stack();
4147}
4148
4149static int match_held_lock(const struct held_lock *hlock,
4150 const struct lockdep_map *lock)
4151{
4152 if (hlock->instance == lock)
4153 return 1;
4154
4155 if (hlock->references) {
4156 const struct lock_class *class = lock->class_cache[0];
4157
4158 if (!class)
4159 class = look_up_lock_class(lock, 0);
4160
4161 /*
4162 * If look_up_lock_class() failed to find a class, we're trying
4163 * to test if we hold a lock that has never yet been acquired.
4164 * Clearly if the lock hasn't been acquired _ever_, we're not
4165 * holding it either, so report failure.
4166 */
4167 if (!class)
4168 return 0;
4169
4170 /*
4171 * References, but not a lock we're actually ref-counting?
4172 * State got messed up, follow the sites that change ->references
4173 * and try to make sense of it.
4174 */
4175 if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
4176 return 0;
4177
4178 if (hlock->class_idx == class - lock_classes)
4179 return 1;
4180 }
4181
4182 return 0;
4183}
4184
4185/* @depth must not be zero */
4186static struct held_lock *find_held_lock(struct task_struct *curr,
4187 struct lockdep_map *lock,
4188 unsigned int depth, int *idx)
4189{
4190 struct held_lock *ret, *hlock, *prev_hlock;
4191 int i;
4192
4193 i = depth - 1;
4194 hlock = curr->held_locks + i;
4195 ret = hlock;
4196 if (match_held_lock(hlock, lock))
4197 goto out;
4198
4199 ret = NULL;
4200 for (i--, prev_hlock = hlock--;
4201 i >= 0;
4202 i--, prev_hlock = hlock--) {
4203 /*
4204 * We must not cross into another context:
4205 */
4206 if (prev_hlock->irq_context != hlock->irq_context) {
4207 ret = NULL;
4208 break;
4209 }
4210 if (match_held_lock(hlock, lock)) {
4211 ret = hlock;
4212 break;
4213 }
4214 }
4215
4216out:
4217 *idx = i;
4218 return ret;
4219}
4220
4221static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
4222 int idx, unsigned int *merged)
4223{
4224 struct held_lock *hlock;
4225 int first_idx = idx;
4226
4227 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
4228 return 0;
4229
4230 for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
4231 switch (__lock_acquire(hlock->instance,
4232 hlock_class(hlock)->subclass,
4233 hlock->trylock,
4234 hlock->read, hlock->check,
4235 hlock->hardirqs_off,
4236 hlock->nest_lock, hlock->acquire_ip,
4237 hlock->references, hlock->pin_count)) {
4238 case 0:
4239 return 1;
4240 case 1:
4241 break;
4242 case 2:
4243 *merged += (idx == first_idx);
4244 break;
4245 default:
4246 WARN_ON(1);
4247 return 0;
4248 }
4249 }
4250 return 0;
4251}
4252
4253static int
4254__lock_set_class(struct lockdep_map *lock, const char *name,
4255 struct lock_class_key *key, unsigned int subclass,
4256 unsigned long ip)
4257{
4258 struct task_struct *curr = current;
4259 unsigned int depth, merged = 0;
4260 struct held_lock *hlock;
4261 struct lock_class *class;
4262 int i;
4263
4264 if (unlikely(!debug_locks))
4265 return 0;
4266
4267 depth = curr->lockdep_depth;
4268 /*
4269 * This function is about (re)setting the class of a held lock,
4270 * yet we're not actually holding any locks. Naughty user!
4271 */
4272 if (DEBUG_LOCKS_WARN_ON(!depth))
4273 return 0;
4274
4275 hlock = find_held_lock(curr, lock, depth, &i);
4276 if (!hlock) {
4277 print_unlock_imbalance_bug(curr, lock, ip);
4278 return 0;
4279 }
4280
4281 lockdep_init_map(lock, name, key, 0);
4282 class = register_lock_class(lock, subclass, 0);
4283 hlock->class_idx = class - lock_classes;
4284
4285 curr->lockdep_depth = i;
4286 curr->curr_chain_key = hlock->prev_chain_key;
4287
4288 if (reacquire_held_locks(curr, depth, i, &merged))
4289 return 0;
4290
4291 /*
4292 * I took it apart and put it back together again, except now I have
4293 * these 'spare' parts.. where shall I put them.
4294 */
4295 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged))
4296 return 0;
4297 return 1;
4298}
4299
4300static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
4301{
4302 struct task_struct *curr = current;
4303 unsigned int depth, merged = 0;
4304 struct held_lock *hlock;
4305 int i;
4306
4307 if (unlikely(!debug_locks))
4308 return 0;
4309
4310 depth = curr->lockdep_depth;
4311 /*
4312 * This function is about (re)setting the class of a held lock,
4313 * yet we're not actually holding any locks. Naughty user!
4314 */
4315 if (DEBUG_LOCKS_WARN_ON(!depth))
4316 return 0;
4317
4318 hlock = find_held_lock(curr, lock, depth, &i);
4319 if (!hlock) {
4320 print_unlock_imbalance_bug(curr, lock, ip);
4321 return 0;
4322 }
4323
4324 curr->lockdep_depth = i;
4325 curr->curr_chain_key = hlock->prev_chain_key;
4326
4327 WARN(hlock->read, "downgrading a read lock");
4328 hlock->read = 1;
4329 hlock->acquire_ip = ip;
4330
4331 if (reacquire_held_locks(curr, depth, i, &merged))
4332 return 0;
4333
4334 /* Merging can't happen with unchanged classes.. */
4335 if (DEBUG_LOCKS_WARN_ON(merged))
4336 return 0;
4337
4338 /*
4339 * I took it apart and put it back together again, except now I have
4340 * these 'spare' parts.. where shall I put them.
4341 */
4342 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
4343 return 0;
4344
4345 return 1;
4346}
4347
4348/*
4349 * Remove the lock to the list of currently held locks - this gets
4350 * called on mutex_unlock()/spin_unlock*() (or on a failed
4351 * mutex_lock_interruptible()).
4352 *
4353 * @nested is an hysterical artifact, needs a tree wide cleanup.
4354 */
4355static int
4356__lock_release(struct lockdep_map *lock, unsigned long ip)
4357{
4358 struct task_struct *curr = current;
4359 unsigned int depth, merged = 1;
4360 struct held_lock *hlock;
4361 int i;
4362
4363 if (unlikely(!debug_locks))
4364 return 0;
4365
4366 depth = curr->lockdep_depth;
4367 /*
4368 * So we're all set to release this lock.. wait what lock? We don't
4369 * own any locks, you've been drinking again?
4370 */
4371 if (depth <= 0) {
4372 print_unlock_imbalance_bug(curr, lock, ip);
4373 return 0;
4374 }
4375
4376 /*
4377 * Check whether the lock exists in the current stack
4378 * of held locks:
4379 */
4380 hlock = find_held_lock(curr, lock, depth, &i);
4381 if (!hlock) {
4382 print_unlock_imbalance_bug(curr, lock, ip);
4383 return 0;
4384 }
4385
4386 if (hlock->instance == lock)
4387 lock_release_holdtime(hlock);
4388
4389 WARN(hlock->pin_count, "releasing a pinned lock\n");
4390
4391 if (hlock->references) {
4392 hlock->references--;
4393 if (hlock->references) {
4394 /*
4395 * We had, and after removing one, still have
4396 * references, the current lock stack is still
4397 * valid. We're done!
4398 */
4399 return 1;
4400 }
4401 }
4402
4403 /*
4404 * We have the right lock to unlock, 'hlock' points to it.
4405 * Now we remove it from the stack, and add back the other
4406 * entries (if any), recalculating the hash along the way:
4407 */
4408
4409 curr->lockdep_depth = i;
4410 curr->curr_chain_key = hlock->prev_chain_key;
4411
4412 /*
4413 * The most likely case is when the unlock is on the innermost
4414 * lock. In this case, we are done!
4415 */
4416 if (i == depth-1)
4417 return 1;
4418
4419 if (reacquire_held_locks(curr, depth, i + 1, &merged))
4420 return 0;
4421
4422 /*
4423 * We had N bottles of beer on the wall, we drank one, but now
4424 * there's not N-1 bottles of beer left on the wall...
4425 * Pouring two of the bottles together is acceptable.
4426 */
4427 DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged);
4428
4429 /*
4430 * Since reacquire_held_locks() would have called check_chain_key()
4431 * indirectly via __lock_acquire(), we don't need to do it again
4432 * on return.
4433 */
4434 return 0;
4435}
4436
4437static nokprobe_inline
4438int __lock_is_held(const struct lockdep_map *lock, int read)
4439{
4440 struct task_struct *curr = current;
4441 int i;
4442
4443 for (i = 0; i < curr->lockdep_depth; i++) {
4444 struct held_lock *hlock = curr->held_locks + i;
4445
4446 if (match_held_lock(hlock, lock)) {
4447 if (read == -1 || hlock->read == read)
4448 return 1;
4449
4450 return 0;
4451 }
4452 }
4453
4454 return 0;
4455}
4456
4457static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock)
4458{
4459 struct pin_cookie cookie = NIL_COOKIE;
4460 struct task_struct *curr = current;
4461 int i;
4462
4463 if (unlikely(!debug_locks))
4464 return cookie;
4465
4466 for (i = 0; i < curr->lockdep_depth; i++) {
4467 struct held_lock *hlock = curr->held_locks + i;
4468
4469 if (match_held_lock(hlock, lock)) {
4470 /*
4471 * Grab 16bits of randomness; this is sufficient to not
4472 * be guessable and still allows some pin nesting in
4473 * our u32 pin_count.
4474 */
4475 cookie.val = 1 + (prandom_u32() >> 16);
4476 hlock->pin_count += cookie.val;
4477 return cookie;
4478 }
4479 }
4480
4481 WARN(1, "pinning an unheld lock\n");
4482 return cookie;
4483}
4484
4485static void __lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
4486{
4487 struct task_struct *curr = current;
4488 int i;
4489
4490 if (unlikely(!debug_locks))
4491 return;
4492
4493 for (i = 0; i < curr->lockdep_depth; i++) {
4494 struct held_lock *hlock = curr->held_locks + i;
4495
4496 if (match_held_lock(hlock, lock)) {
4497 hlock->pin_count += cookie.val;
4498 return;
4499 }
4500 }
4501
4502 WARN(1, "pinning an unheld lock\n");
4503}
4504
4505static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
4506{
4507 struct task_struct *curr = current;
4508 int i;
4509
4510 if (unlikely(!debug_locks))
4511 return;
4512
4513 for (i = 0; i < curr->lockdep_depth; i++) {
4514 struct held_lock *hlock = curr->held_locks + i;
4515
4516 if (match_held_lock(hlock, lock)) {
4517 if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n"))
4518 return;
4519
4520 hlock->pin_count -= cookie.val;
4521
4522 if (WARN((int)hlock->pin_count < 0, "pin count corrupted\n"))
4523 hlock->pin_count = 0;
4524
4525 return;
4526 }
4527 }
4528
4529 WARN(1, "unpinning an unheld lock\n");
4530}
4531
4532/*
4533 * Check whether we follow the irq-flags state precisely:
4534 */
4535static void check_flags(unsigned long flags)
4536{
4537#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP)
4538 if (!debug_locks)
4539 return;
4540
4541 if (irqs_disabled_flags(flags)) {
4542 if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
4543 printk("possible reason: unannotated irqs-off.\n");
4544 }
4545 } else {
4546 if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
4547 printk("possible reason: unannotated irqs-on.\n");
4548 }
4549 }
4550
4551 /*
4552 * We dont accurately track softirq state in e.g.
4553 * hardirq contexts (such as on 4KSTACKS), so only
4554 * check if not in hardirq contexts:
4555 */
4556 if (!hardirq_count()) {
4557 if (softirq_count()) {
4558 /* like the above, but with softirqs */
4559 DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
4560 } else {
4561 /* lick the above, does it taste good? */
4562 DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
4563 }
4564 }
4565
4566 if (!debug_locks)
4567 print_irqtrace_events(current);
4568#endif
4569}
4570
4571void lock_set_class(struct lockdep_map *lock, const char *name,
4572 struct lock_class_key *key, unsigned int subclass,
4573 unsigned long ip)
4574{
4575 unsigned long flags;
4576
4577 if (unlikely(current->lockdep_recursion))
4578 return;
4579
4580 raw_local_irq_save(flags);
4581 current->lockdep_recursion++;
4582 check_flags(flags);
4583 if (__lock_set_class(lock, name, key, subclass, ip))
4584 check_chain_key(current);
4585 lockdep_recursion_finish();
4586 raw_local_irq_restore(flags);
4587}
4588EXPORT_SYMBOL_GPL(lock_set_class);
4589
4590void lock_downgrade(struct lockdep_map *lock, unsigned long ip)
4591{
4592 unsigned long flags;
4593
4594 if (unlikely(current->lockdep_recursion))
4595 return;
4596
4597 raw_local_irq_save(flags);
4598 current->lockdep_recursion++;
4599 check_flags(flags);
4600 if (__lock_downgrade(lock, ip))
4601 check_chain_key(current);
4602 lockdep_recursion_finish();
4603 raw_local_irq_restore(flags);
4604}
4605EXPORT_SYMBOL_GPL(lock_downgrade);
4606
4607/*
4608 * We are not always called with irqs disabled - do that here,
4609 * and also avoid lockdep recursion:
4610 */
4611void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
4612 int trylock, int read, int check,
4613 struct lockdep_map *nest_lock, unsigned long ip)
4614{
4615 unsigned long flags;
4616
4617 if (unlikely(current->lockdep_recursion))
4618 return;
4619
4620 raw_local_irq_save(flags);
4621 check_flags(flags);
4622
4623 current->lockdep_recursion++;
4624 trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
4625 __lock_acquire(lock, subclass, trylock, read, check,
4626 irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
4627 lockdep_recursion_finish();
4628 raw_local_irq_restore(flags);
4629}
4630EXPORT_SYMBOL_GPL(lock_acquire);
4631
4632void lock_release(struct lockdep_map *lock, int nested,
4633 unsigned long ip)
4634{
4635 unsigned long flags;
4636
4637 if (unlikely(current->lockdep_recursion))
4638 return;
4639
4640 raw_local_irq_save(flags);
4641 check_flags(flags);
4642 current->lockdep_recursion++;
4643 trace_lock_release(lock, ip);
4644 if (__lock_release(lock, ip))
4645 check_chain_key(current);
4646 lockdep_recursion_finish();
4647 raw_local_irq_restore(flags);
4648}
4649EXPORT_SYMBOL_GPL(lock_release);
4650
4651int lock_is_held_type(const struct lockdep_map *lock, int read)
4652{
4653 unsigned long flags;
4654 int ret = 0;
4655
4656 if (unlikely(current->lockdep_recursion))
4657 return 1; /* avoid false negative lockdep_assert_held() */
4658
4659 raw_local_irq_save(flags);
4660 check_flags(flags);
4661
4662 current->lockdep_recursion++;
4663 ret = __lock_is_held(lock, read);
4664 lockdep_recursion_finish();
4665 raw_local_irq_restore(flags);
4666
4667 return ret;
4668}
4669EXPORT_SYMBOL_GPL(lock_is_held_type);
4670NOKPROBE_SYMBOL(lock_is_held_type);
4671
4672struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
4673{
4674 struct pin_cookie cookie = NIL_COOKIE;
4675 unsigned long flags;
4676
4677 if (unlikely(current->lockdep_recursion))
4678 return cookie;
4679
4680 raw_local_irq_save(flags);
4681 check_flags(flags);
4682
4683 current->lockdep_recursion++;
4684 cookie = __lock_pin_lock(lock);
4685 lockdep_recursion_finish();
4686 raw_local_irq_restore(flags);
4687
4688 return cookie;
4689}
4690EXPORT_SYMBOL_GPL(lock_pin_lock);
4691
4692void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
4693{
4694 unsigned long flags;
4695
4696 if (unlikely(current->lockdep_recursion))
4697 return;
4698
4699 raw_local_irq_save(flags);
4700 check_flags(flags);
4701
4702 current->lockdep_recursion++;
4703 __lock_repin_lock(lock, cookie);
4704 lockdep_recursion_finish();
4705 raw_local_irq_restore(flags);
4706}
4707EXPORT_SYMBOL_GPL(lock_repin_lock);
4708
4709void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
4710{
4711 unsigned long flags;
4712
4713 if (unlikely(current->lockdep_recursion))
4714 return;
4715
4716 raw_local_irq_save(flags);
4717 check_flags(flags);
4718
4719 current->lockdep_recursion++;
4720 __lock_unpin_lock(lock, cookie);
4721 lockdep_recursion_finish();
4722 raw_local_irq_restore(flags);
4723}
4724EXPORT_SYMBOL_GPL(lock_unpin_lock);
4725
4726#ifdef CONFIG_LOCK_STAT
4727static void print_lock_contention_bug(struct task_struct *curr,
4728 struct lockdep_map *lock,
4729 unsigned long ip)
4730{
4731 if (!debug_locks_off())
4732 return;
4733 if (debug_locks_silent)
4734 return;
4735
4736 pr_warn("\n");
4737 pr_warn("=================================\n");
4738 pr_warn("WARNING: bad contention detected!\n");
4739 print_kernel_ident();
4740 pr_warn("---------------------------------\n");
4741 pr_warn("%s/%d is trying to contend lock (",
4742 curr->comm, task_pid_nr(curr));
4743 print_lockdep_cache(lock);
4744 pr_cont(") at:\n");
4745 print_ip_sym(ip);
4746 pr_warn("but there are no locks held!\n");
4747 pr_warn("\nother info that might help us debug this:\n");
4748 lockdep_print_held_locks(curr);
4749
4750 pr_warn("\nstack backtrace:\n");
4751 dump_stack();
4752}
4753
4754static void
4755__lock_contended(struct lockdep_map *lock, unsigned long ip)
4756{
4757 struct task_struct *curr = current;
4758 struct held_lock *hlock;
4759 struct lock_class_stats *stats;
4760 unsigned int depth;
4761 int i, contention_point, contending_point;
4762
4763 depth = curr->lockdep_depth;
4764 /*
4765 * Whee, we contended on this lock, except it seems we're not
4766 * actually trying to acquire anything much at all..
4767 */
4768 if (DEBUG_LOCKS_WARN_ON(!depth))
4769 return;
4770
4771 hlock = find_held_lock(curr, lock, depth, &i);
4772 if (!hlock) {
4773 print_lock_contention_bug(curr, lock, ip);
4774 return;
4775 }
4776
4777 if (hlock->instance != lock)
4778 return;
4779
4780 hlock->waittime_stamp = lockstat_clock();
4781
4782 contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
4783 contending_point = lock_point(hlock_class(hlock)->contending_point,
4784 lock->ip);
4785
4786 stats = get_lock_stats(hlock_class(hlock));
4787 if (contention_point < LOCKSTAT_POINTS)
4788 stats->contention_point[contention_point]++;
4789 if (contending_point < LOCKSTAT_POINTS)
4790 stats->contending_point[contending_point]++;
4791 if (lock->cpu != smp_processor_id())
4792 stats->bounces[bounce_contended + !!hlock->read]++;
4793}
4794
4795static void
4796__lock_acquired(struct lockdep_map *lock, unsigned long ip)
4797{
4798 struct task_struct *curr = current;
4799 struct held_lock *hlock;
4800 struct lock_class_stats *stats;
4801 unsigned int depth;
4802 u64 now, waittime = 0;
4803 int i, cpu;
4804
4805 depth = curr->lockdep_depth;
4806 /*
4807 * Yay, we acquired ownership of this lock we didn't try to
4808 * acquire, how the heck did that happen?
4809 */
4810 if (DEBUG_LOCKS_WARN_ON(!depth))
4811 return;
4812
4813 hlock = find_held_lock(curr, lock, depth, &i);
4814 if (!hlock) {
4815 print_lock_contention_bug(curr, lock, _RET_IP_);
4816 return;
4817 }
4818
4819 if (hlock->instance != lock)
4820 return;
4821
4822 cpu = smp_processor_id();
4823 if (hlock->waittime_stamp) {
4824 now = lockstat_clock();
4825 waittime = now - hlock->waittime_stamp;
4826 hlock->holdtime_stamp = now;
4827 }
4828
4829 trace_lock_acquired(lock, ip);
4830
4831 stats = get_lock_stats(hlock_class(hlock));
4832 if (waittime) {
4833 if (hlock->read)
4834 lock_time_inc(&stats->read_waittime, waittime);
4835 else
4836 lock_time_inc(&stats->write_waittime, waittime);
4837 }
4838 if (lock->cpu != cpu)
4839 stats->bounces[bounce_acquired + !!hlock->read]++;
4840
4841 lock->cpu = cpu;
4842 lock->ip = ip;
4843}
4844
4845void lock_contended(struct lockdep_map *lock, unsigned long ip)
4846{
4847 unsigned long flags;
4848
4849 if (unlikely(!lock_stat || !debug_locks))
4850 return;
4851
4852 if (unlikely(current->lockdep_recursion))
4853 return;
4854
4855 raw_local_irq_save(flags);
4856 check_flags(flags);
4857 current->lockdep_recursion++;
4858 trace_lock_contended(lock, ip);
4859 __lock_contended(lock, ip);
4860 lockdep_recursion_finish();
4861 raw_local_irq_restore(flags);
4862}
4863EXPORT_SYMBOL_GPL(lock_contended);
4864
4865void lock_acquired(struct lockdep_map *lock, unsigned long ip)
4866{
4867 unsigned long flags;
4868
4869 if (unlikely(!lock_stat || !debug_locks))
4870 return;
4871
4872 if (unlikely(current->lockdep_recursion))
4873 return;
4874
4875 raw_local_irq_save(flags);
4876 check_flags(flags);
4877 current->lockdep_recursion++;
4878 __lock_acquired(lock, ip);
4879 lockdep_recursion_finish();
4880 raw_local_irq_restore(flags);
4881}
4882EXPORT_SYMBOL_GPL(lock_acquired);
4883#endif
4884
4885/*
4886 * Used by the testsuite, sanitize the validator state
4887 * after a simulated failure:
4888 */
4889
4890void lockdep_reset(void)
4891{
4892 unsigned long flags;
4893 int i;
4894
4895 raw_local_irq_save(flags);
4896 lockdep_init_task(current);
4897 memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
4898 nr_hardirq_chains = 0;
4899 nr_softirq_chains = 0;
4900 nr_process_chains = 0;
4901 debug_locks = 1;
4902 for (i = 0; i < CHAINHASH_SIZE; i++)
4903 INIT_HLIST_HEAD(chainhash_table + i);
4904 raw_local_irq_restore(flags);
4905}
4906
4907/* Remove a class from a lock chain. Must be called with the graph lock held. */
4908static void remove_class_from_lock_chain(struct pending_free *pf,
4909 struct lock_chain *chain,
4910 struct lock_class *class)
4911{
4912#ifdef CONFIG_PROVE_LOCKING
4913 struct lock_chain *new_chain;
4914 u64 chain_key;
4915 int i;
4916
4917 for (i = chain->base; i < chain->base + chain->depth; i++) {
4918 if (chain_hlocks[i] != class - lock_classes)
4919 continue;
4920 /* The code below leaks one chain_hlock[] entry. */
4921 if (--chain->depth > 0) {
4922 memmove(&chain_hlocks[i], &chain_hlocks[i + 1],
4923 (chain->base + chain->depth - i) *
4924 sizeof(chain_hlocks[0]));
4925 }
4926 /*
4927 * Each lock class occurs at most once in a lock chain so once
4928 * we found a match we can break out of this loop.
4929 */
4930 goto recalc;
4931 }
4932 /* Since the chain has not been modified, return. */
4933 return;
4934
4935recalc:
4936 chain_key = INITIAL_CHAIN_KEY;
4937 for (i = chain->base; i < chain->base + chain->depth; i++)
4938 chain_key = iterate_chain_key(chain_key, chain_hlocks[i]);
4939 if (chain->depth && chain->chain_key == chain_key)
4940 return;
4941 /* Overwrite the chain key for concurrent RCU readers. */
4942 WRITE_ONCE(chain->chain_key, chain_key);
4943 dec_chains(chain->irq_context);
4944
4945 /*
4946 * Note: calling hlist_del_rcu() from inside a
4947 * hlist_for_each_entry_rcu() loop is safe.
4948 */
4949 hlist_del_rcu(&chain->entry);
4950 __set_bit(chain - lock_chains, pf->lock_chains_being_freed);
4951 if (chain->depth == 0)
4952 return;
4953 /*
4954 * If the modified lock chain matches an existing lock chain, drop
4955 * the modified lock chain.
4956 */
4957 if (lookup_chain_cache(chain_key))
4958 return;
4959 new_chain = alloc_lock_chain();
4960 if (WARN_ON_ONCE(!new_chain)) {
4961 debug_locks_off();
4962 return;
4963 }
4964 *new_chain = *chain;
4965 hlist_add_head_rcu(&new_chain->entry, chainhashentry(chain_key));
4966 inc_chains(new_chain->irq_context);
4967#endif
4968}
4969
4970/* Must be called with the graph lock held. */
4971static void remove_class_from_lock_chains(struct pending_free *pf,
4972 struct lock_class *class)
4973{
4974 struct lock_chain *chain;
4975 struct hlist_head *head;
4976 int i;
4977
4978 for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
4979 head = chainhash_table + i;
4980 hlist_for_each_entry_rcu(chain, head, entry) {
4981 remove_class_from_lock_chain(pf, chain, class);
4982 }
4983 }
4984}
4985
4986/*
4987 * Remove all references to a lock class. The caller must hold the graph lock.
4988 */
4989static void zap_class(struct pending_free *pf, struct lock_class *class)
4990{
4991 struct lock_list *entry;
4992 int i;
4993
4994 WARN_ON_ONCE(!class->key);
4995
4996 /*
4997 * Remove all dependencies this lock is
4998 * involved in:
4999 */
5000 for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
5001 entry = list_entries + i;
5002 if (entry->class != class && entry->links_to != class)
5003 continue;
5004 __clear_bit(i, list_entries_in_use);
5005 nr_list_entries--;
5006 list_del_rcu(&entry->entry);
5007 }
5008 if (list_empty(&class->locks_after) &&
5009 list_empty(&class->locks_before)) {
5010 list_move_tail(&class->lock_entry, &pf->zapped);
5011 hlist_del_rcu(&class->hash_entry);
5012 WRITE_ONCE(class->key, NULL);
5013 WRITE_ONCE(class->name, NULL);
5014 nr_lock_classes--;
5015 __clear_bit(class - lock_classes, lock_classes_in_use);
5016 } else {
5017 WARN_ONCE(true, "%s() failed for class %s\n", __func__,
5018 class->name);
5019 }
5020
5021 remove_class_from_lock_chains(pf, class);
5022}
5023
5024static void reinit_class(struct lock_class *class)
5025{
5026 void *const p = class;
5027 const unsigned int offset = offsetof(struct lock_class, key);
5028
5029 WARN_ON_ONCE(!class->lock_entry.next);
5030 WARN_ON_ONCE(!list_empty(&class->locks_after));
5031 WARN_ON_ONCE(!list_empty(&class->locks_before));
5032 memset(p + offset, 0, sizeof(*class) - offset);
5033 WARN_ON_ONCE(!class->lock_entry.next);
5034 WARN_ON_ONCE(!list_empty(&class->locks_after));
5035 WARN_ON_ONCE(!list_empty(&class->locks_before));
5036}
5037
5038static inline int within(const void *addr, void *start, unsigned long size)
5039{
5040 return addr >= start && addr < start + size;
5041}
5042
5043static bool inside_selftest(void)
5044{
5045 return current == lockdep_selftest_task_struct;
5046}
5047
5048/* The caller must hold the graph lock. */
5049static struct pending_free *get_pending_free(void)
5050{
5051 return delayed_free.pf + delayed_free.index;
5052}
5053
5054static void free_zapped_rcu(struct rcu_head *cb);
5055
5056/*
5057* See if we need to queue an RCU callback, must called with
5058* the lockdep lock held, returns false if either we don't have
5059* any pending free or the callback is already scheduled.
5060* Otherwise, a call_rcu() must follow this function call.
5061*/
5062static bool prepare_call_rcu_zapped(struct pending_free *pf)
5063{
5064 WARN_ON_ONCE(inside_selftest());
5065
5066 if (list_empty(&pf->zapped))
5067 return false;
5068
5069 if (delayed_free.scheduled)
5070 return false;
5071
5072 delayed_free.scheduled = true;
5073
5074 WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf);
5075 delayed_free.index ^= 1;
5076
5077 return true;
5078}
5079
5080/* The caller must hold the graph lock. May be called from RCU context. */
5081static void __free_zapped_classes(struct pending_free *pf)
5082{
5083 struct lock_class *class;
5084
5085 check_data_structures();
5086
5087 list_for_each_entry(class, &pf->zapped, lock_entry)
5088 reinit_class(class);
5089
5090 list_splice_init(&pf->zapped, &free_lock_classes);
5091
5092#ifdef CONFIG_PROVE_LOCKING
5093 bitmap_andnot(lock_chains_in_use, lock_chains_in_use,
5094 pf->lock_chains_being_freed, ARRAY_SIZE(lock_chains));
5095 bitmap_clear(pf->lock_chains_being_freed, 0, ARRAY_SIZE(lock_chains));
5096#endif
5097}
5098
5099static void free_zapped_rcu(struct rcu_head *ch)
5100{
5101 struct pending_free *pf;
5102 unsigned long flags;
5103 bool need_callback;
5104
5105 if (WARN_ON_ONCE(ch != &delayed_free.rcu_head))
5106 return;
5107
5108 raw_local_irq_save(flags);
5109 lockdep_lock();
5110
5111 /* closed head */
5112 pf = delayed_free.pf + (delayed_free.index ^ 1);
5113 __free_zapped_classes(pf);
5114 delayed_free.scheduled = false;
5115 need_callback =
5116 prepare_call_rcu_zapped(delayed_free.pf + delayed_free.index);
5117 lockdep_unlock();
5118 raw_local_irq_restore(flags);
5119
5120 /*
5121 * If there's pending free and its callback has not been scheduled,
5122 * queue an RCU callback.
5123 */
5124 if (need_callback)
5125 call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
5126
5127}
5128
5129/*
5130 * Remove all lock classes from the class hash table and from the
5131 * all_lock_classes list whose key or name is in the address range [start,
5132 * start + size). Move these lock classes to the zapped_classes list. Must
5133 * be called with the graph lock held.
5134 */
5135static void __lockdep_free_key_range(struct pending_free *pf, void *start,
5136 unsigned long size)
5137{
5138 struct lock_class *class;
5139 struct hlist_head *head;
5140 int i;
5141
5142 /* Unhash all classes that were created by a module. */
5143 for (i = 0; i < CLASSHASH_SIZE; i++) {
5144 head = classhash_table + i;
5145 hlist_for_each_entry_rcu(class, head, hash_entry) {
5146 if (!within(class->key, start, size) &&
5147 !within(class->name, start, size))
5148 continue;
5149 zap_class(pf, class);
5150 }
5151 }
5152}
5153
5154/*
5155 * Used in module.c to remove lock classes from memory that is going to be
5156 * freed; and possibly re-used by other modules.
5157 *
5158 * We will have had one synchronize_rcu() before getting here, so we're
5159 * guaranteed nobody will look up these exact classes -- they're properly dead
5160 * but still allocated.
5161 */
5162static void lockdep_free_key_range_reg(void *start, unsigned long size)
5163{
5164 struct pending_free *pf;
5165 unsigned long flags;
5166 bool need_callback;
5167
5168 init_data_structures_once();
5169
5170 raw_local_irq_save(flags);
5171 lockdep_lock();
5172 pf = get_pending_free();
5173 __lockdep_free_key_range(pf, start, size);
5174 need_callback = prepare_call_rcu_zapped(pf);
5175 lockdep_unlock();
5176 raw_local_irq_restore(flags);
5177 if (need_callback)
5178 call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
5179 /*
5180 * Wait for any possible iterators from look_up_lock_class() to pass
5181 * before continuing to free the memory they refer to.
5182 */
5183 synchronize_rcu();
5184}
5185
5186/*
5187 * Free all lockdep keys in the range [start, start+size). Does not sleep.
5188 * Ignores debug_locks. Must only be used by the lockdep selftests.
5189 */
5190static void lockdep_free_key_range_imm(void *start, unsigned long size)
5191{
5192 struct pending_free *pf = delayed_free.pf;
5193 unsigned long flags;
5194
5195 init_data_structures_once();
5196
5197 raw_local_irq_save(flags);
5198 lockdep_lock();
5199 __lockdep_free_key_range(pf, start, size);
5200 __free_zapped_classes(pf);
5201 lockdep_unlock();
5202 raw_local_irq_restore(flags);
5203}
5204
5205void lockdep_free_key_range(void *start, unsigned long size)
5206{
5207 init_data_structures_once();
5208
5209 if (inside_selftest())
5210 lockdep_free_key_range_imm(start, size);
5211 else
5212 lockdep_free_key_range_reg(start, size);
5213}
5214
5215/*
5216 * Check whether any element of the @lock->class_cache[] array refers to a
5217 * registered lock class. The caller must hold either the graph lock or the
5218 * RCU read lock.
5219 */
5220static bool lock_class_cache_is_registered(struct lockdep_map *lock)
5221{
5222 struct lock_class *class;
5223 struct hlist_head *head;
5224 int i, j;
5225
5226 for (i = 0; i < CLASSHASH_SIZE; i++) {
5227 head = classhash_table + i;
5228 hlist_for_each_entry_rcu(class, head, hash_entry) {
5229 for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
5230 if (lock->class_cache[j] == class)
5231 return true;
5232 }
5233 }
5234 return false;
5235}
5236
5237/* The caller must hold the graph lock. Does not sleep. */
5238static void __lockdep_reset_lock(struct pending_free *pf,
5239 struct lockdep_map *lock)
5240{
5241 struct lock_class *class;
5242 int j;
5243
5244 /*
5245 * Remove all classes this lock might have:
5246 */
5247 for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
5248 /*
5249 * If the class exists we look it up and zap it:
5250 */
5251 class = look_up_lock_class(lock, j);
5252 if (class)
5253 zap_class(pf, class);
5254 }
5255 /*
5256 * Debug check: in the end all mapped classes should
5257 * be gone.
5258 */
5259 if (WARN_ON_ONCE(lock_class_cache_is_registered(lock)))
5260 debug_locks_off();
5261}
5262
5263/*
5264 * Remove all information lockdep has about a lock if debug_locks == 1. Free
5265 * released data structures from RCU context.
5266 */
5267static void lockdep_reset_lock_reg(struct lockdep_map *lock)
5268{
5269 struct pending_free *pf;
5270 unsigned long flags;
5271 int locked;
5272 bool need_callback = false;
5273
5274 raw_local_irq_save(flags);
5275 locked = graph_lock();
5276 if (!locked)
5277 goto out_irq;
5278
5279 pf = get_pending_free();
5280 __lockdep_reset_lock(pf, lock);
5281 need_callback = prepare_call_rcu_zapped(pf);
5282
5283 graph_unlock();
5284out_irq:
5285 raw_local_irq_restore(flags);
5286 if (need_callback)
5287 call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
5288}
5289
5290/*
5291 * Reset a lock. Does not sleep. Ignores debug_locks. Must only be used by the
5292 * lockdep selftests.
5293 */
5294static void lockdep_reset_lock_imm(struct lockdep_map *lock)
5295{
5296 struct pending_free *pf = delayed_free.pf;
5297 unsigned long flags;
5298
5299 raw_local_irq_save(flags);
5300 lockdep_lock();
5301 __lockdep_reset_lock(pf, lock);
5302 __free_zapped_classes(pf);
5303 lockdep_unlock();
5304 raw_local_irq_restore(flags);
5305}
5306
5307void lockdep_reset_lock(struct lockdep_map *lock)
5308{
5309 init_data_structures_once();
5310
5311 if (inside_selftest())
5312 lockdep_reset_lock_imm(lock);
5313 else
5314 lockdep_reset_lock_reg(lock);
5315}
5316
5317/*
5318 * Unregister a dynamically allocated key.
5319 *
5320 * Unlike lockdep_register_key(), a search is always done to find a matching
5321 * key irrespective of debug_locks to avoid potential invalid access to freed
5322 * memory in lock_class entry.
5323 */
5324void lockdep_unregister_key(struct lock_class_key *key)
5325{
5326 struct hlist_head *hash_head = keyhashentry(key);
5327 struct lock_class_key *k;
5328 struct pending_free *pf;
5329 unsigned long flags;
5330 bool found = false;
5331 bool need_callback = false;
5332
5333 might_sleep();
5334
5335 if (WARN_ON_ONCE(static_obj(key)))
5336 return;
5337
5338 raw_local_irq_save(flags);
5339 lockdep_lock();
5340
5341 hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
5342 if (k == key) {
5343 hlist_del_rcu(&k->hash_entry);
5344 found = true;
5345 break;
5346 }
5347 }
5348 WARN_ON_ONCE(!found && debug_locks);
5349 if (found) {
5350 pf = get_pending_free();
5351 __lockdep_free_key_range(pf, key, 1);
5352 need_callback = prepare_call_rcu_zapped(pf);
5353 }
5354 lockdep_unlock();
5355 raw_local_irq_restore(flags);
5356
5357 if (need_callback)
5358 call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
5359
5360 /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
5361 synchronize_rcu();
5362}
5363EXPORT_SYMBOL_GPL(lockdep_unregister_key);
5364
5365void __init lockdep_init(void)
5366{
5367 printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
5368
5369 printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES);
5370 printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH);
5371 printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS);
5372 printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE);
5373 printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES);
5374 printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS);
5375 printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE);
5376
5377 printk(" memory used by lock dependency info: %zu kB\n",
5378 (sizeof(lock_classes) +
5379 sizeof(lock_classes_in_use) +
5380 sizeof(classhash_table) +
5381 sizeof(list_entries) +
5382 sizeof(list_entries_in_use) +
5383 sizeof(chainhash_table) +
5384 sizeof(delayed_free)
5385#ifdef CONFIG_PROVE_LOCKING
5386 + sizeof(lock_cq)
5387 + sizeof(lock_chains)
5388 + sizeof(lock_chains_in_use)
5389 + sizeof(chain_hlocks)
5390#endif
5391 ) / 1024
5392 );
5393
5394#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
5395 printk(" memory used for stack traces: %zu kB\n",
5396 (sizeof(stack_trace) + sizeof(stack_trace_hash)) / 1024
5397 );
5398#endif
5399
5400 printk(" per task-struct memory footprint: %zu bytes\n",
5401 sizeof(((struct task_struct *)NULL)->held_locks));
5402}
5403
5404static void
5405print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
5406 const void *mem_to, struct held_lock *hlock)
5407{
5408 if (!debug_locks_off())
5409 return;
5410 if (debug_locks_silent)
5411 return;
5412
5413 pr_warn("\n");
5414 pr_warn("=========================\n");
5415 pr_warn("WARNING: held lock freed!\n");
5416 print_kernel_ident();
5417 pr_warn("-------------------------\n");
5418 pr_warn("%s/%d is freeing memory %px-%px, with a lock still held there!\n",
5419 curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
5420 print_lock(hlock);
5421 lockdep_print_held_locks(curr);
5422
5423 pr_warn("\nstack backtrace:\n");
5424 dump_stack();
5425}
5426
5427static inline int not_in_range(const void* mem_from, unsigned long mem_len,
5428 const void* lock_from, unsigned long lock_len)
5429{
5430 return lock_from + lock_len <= mem_from ||
5431 mem_from + mem_len <= lock_from;
5432}
5433
5434/*
5435 * Called when kernel memory is freed (or unmapped), or if a lock
5436 * is destroyed or reinitialized - this code checks whether there is
5437 * any held lock in the memory range of <from> to <to>:
5438 */
5439void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
5440{
5441 struct task_struct *curr = current;
5442 struct held_lock *hlock;
5443 unsigned long flags;
5444 int i;
5445
5446 if (unlikely(!debug_locks))
5447 return;
5448
5449 raw_local_irq_save(flags);
5450 for (i = 0; i < curr->lockdep_depth; i++) {
5451 hlock = curr->held_locks + i;
5452
5453 if (not_in_range(mem_from, mem_len, hlock->instance,
5454 sizeof(*hlock->instance)))
5455 continue;
5456
5457 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
5458 break;
5459 }
5460 raw_local_irq_restore(flags);
5461}
5462EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
5463
5464static void print_held_locks_bug(void)
5465{
5466 if (!debug_locks_off())
5467 return;
5468 if (debug_locks_silent)
5469 return;
5470
5471 pr_warn("\n");
5472 pr_warn("====================================\n");
5473 pr_warn("WARNING: %s/%d still has locks held!\n",
5474 current->comm, task_pid_nr(current));
5475 print_kernel_ident();
5476 pr_warn("------------------------------------\n");
5477 lockdep_print_held_locks(current);
5478 pr_warn("\nstack backtrace:\n");
5479 dump_stack();
5480}
5481
5482void debug_check_no_locks_held(void)
5483{
5484 if (unlikely(current->lockdep_depth > 0))
5485 print_held_locks_bug();
5486}
5487EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
5488
5489#ifdef __KERNEL__
5490void debug_show_all_locks(void)
5491{
5492 struct task_struct *g, *p;
5493
5494 if (unlikely(!debug_locks)) {
5495 pr_warn("INFO: lockdep is turned off.\n");
5496 return;
5497 }
5498 pr_warn("\nShowing all locks held in the system:\n");
5499
5500 rcu_read_lock();
5501 for_each_process_thread(g, p) {
5502 if (!p->lockdep_depth)
5503 continue;
5504 lockdep_print_held_locks(p);
5505 touch_nmi_watchdog();
5506 touch_all_softlockup_watchdogs();
5507 }
5508 rcu_read_unlock();
5509
5510 pr_warn("\n");
5511 pr_warn("=============================================\n\n");
5512}
5513EXPORT_SYMBOL_GPL(debug_show_all_locks);
5514#endif
5515
5516/*
5517 * Careful: only use this function if you are sure that
5518 * the task cannot run in parallel!
5519 */
5520void debug_show_held_locks(struct task_struct *task)
5521{
5522 if (unlikely(!debug_locks)) {
5523 printk("INFO: lockdep is turned off.\n");
5524 return;
5525 }
5526 lockdep_print_held_locks(task);
5527}
5528EXPORT_SYMBOL_GPL(debug_show_held_locks);
5529
5530asmlinkage __visible void lockdep_sys_exit(void)
5531{
5532 struct task_struct *curr = current;
5533
5534 if (unlikely(curr->lockdep_depth)) {
5535 if (!debug_locks_off())
5536 return;
5537 pr_warn("\n");
5538 pr_warn("================================================\n");
5539 pr_warn("WARNING: lock held when returning to user space!\n");
5540 print_kernel_ident();
5541 pr_warn("------------------------------------------------\n");
5542 pr_warn("%s/%d is leaving the kernel with locks still held!\n",
5543 curr->comm, curr->pid);
5544 lockdep_print_held_locks(curr);
5545 }
5546
5547 /*
5548 * The lock history for each syscall should be independent. So wipe the
5549 * slate clean on return to userspace.
5550 */
5551 lockdep_invariant_state(false);
5552}
5553
5554void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
5555{
5556 struct task_struct *curr = current;
5557
5558 /* Note: the following can be executed concurrently, so be careful. */
5559 pr_warn("\n");
5560 pr_warn("=============================\n");
5561 pr_warn("WARNING: suspicious RCU usage\n");
5562 print_kernel_ident();
5563 pr_warn("-----------------------------\n");
5564 pr_warn("%s:%d %s!\n", file, line, s);
5565 pr_warn("\nother info that might help us debug this:\n\n");
5566 pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
5567 !rcu_lockdep_current_cpu_online()
5568 ? "RCU used illegally from offline CPU!\n"
5569 : !rcu_is_watching()
5570 ? "RCU used illegally from idle CPU!\n"
5571 : "",
5572 rcu_scheduler_active, debug_locks);
5573
5574 /*
5575 * If a CPU is in the RCU-free window in idle (ie: in the section
5576 * between rcu_idle_enter() and rcu_idle_exit(), then RCU
5577 * considers that CPU to be in an "extended quiescent state",
5578 * which means that RCU will be completely ignoring that CPU.
5579 * Therefore, rcu_read_lock() and friends have absolutely no
5580 * effect on a CPU running in that state. In other words, even if
5581 * such an RCU-idle CPU has called rcu_read_lock(), RCU might well
5582 * delete data structures out from under it. RCU really has no
5583 * choice here: we need to keep an RCU-free window in idle where
5584 * the CPU may possibly enter into low power mode. This way we can
5585 * notice an extended quiescent state to other CPUs that started a grace
5586 * period. Otherwise we would delay any grace period as long as we run
5587 * in the idle task.
5588 *
5589 * So complain bitterly if someone does call rcu_read_lock(),
5590 * rcu_read_lock_bh() and so on from extended quiescent states.
5591 */
5592 if (!rcu_is_watching())
5593 pr_warn("RCU used illegally from extended quiescent state!\n");
5594
5595 lockdep_print_held_locks(curr);
5596 pr_warn("\nstack backtrace:\n");
5597 dump_stack();
5598}
5599EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);