Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 | /* * kernel/lockdep_internals.h * * Runtime locking correctness validator * * lockdep subsystem internal functions and variables. */ /* * Lock-class usage-state bits: */ enum lock_usage_bit { #define LOCKDEP_STATE(__STATE) \ LOCK_USED_IN_##__STATE, \ LOCK_USED_IN_##__STATE##_READ, \ LOCK_ENABLED_##__STATE, \ LOCK_ENABLED_##__STATE##_READ, #include "lockdep_states.h" #undef LOCKDEP_STATE LOCK_USED, LOCK_USAGE_STATES }; /* * Usage-state bitmasks: */ #define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE), enum { #define LOCKDEP_STATE(__STATE) \ __LOCKF(USED_IN_##__STATE) \ __LOCKF(USED_IN_##__STATE##_READ) \ __LOCKF(ENABLED_##__STATE) \ __LOCKF(ENABLED_##__STATE##_READ) #include "lockdep_states.h" #undef LOCKDEP_STATE __LOCKF(USED) }; #define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ) #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ) #define LOCKF_ENABLED_IRQ_READ \ (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ) #define LOCKF_USED_IN_IRQ_READ \ (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) /* * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies * we track. * * We use the per-lock dependency maps in two ways: we grow it by adding * every to-be-taken lock to all currently held lock's own dependency * table (if it's not there yet), and we check it for lock order * conflicts and deadlocks. */ #define MAX_LOCKDEP_ENTRIES 16384UL #define MAX_LOCKDEP_CHAINS_BITS 15 #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5) /* * Stack-trace: tightly packed array of stack backtrace * addresses. Protected by the hash_lock. */ #define MAX_STACK_TRACE_ENTRIES 262144UL extern struct list_head all_lock_classes; extern struct lock_chain lock_chains[]; #define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2) extern void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS]); extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str); struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i); extern unsigned long nr_lock_classes; extern unsigned long nr_list_entries; extern unsigned long nr_lock_chains; extern int nr_chain_hlocks; extern unsigned long nr_stack_trace_entries; extern unsigned int nr_hardirq_chains; extern unsigned int nr_softirq_chains; extern unsigned int nr_process_chains; extern unsigned int max_lockdep_depth; extern unsigned int max_recursion_depth; extern unsigned int max_bfs_queue_depth; #ifdef CONFIG_PROVE_LOCKING extern unsigned long lockdep_count_forward_deps(struct lock_class *); extern unsigned long lockdep_count_backward_deps(struct lock_class *); #else static inline unsigned long lockdep_count_forward_deps(struct lock_class *class) { return 0; } static inline unsigned long lockdep_count_backward_deps(struct lock_class *class) { return 0; } #endif #ifdef CONFIG_DEBUG_LOCKDEP #include <asm/local.h> /* * Various lockdep statistics. * We want them per cpu as they are often accessed in fast path * and we want to avoid too much cache bouncing. */ struct lockdep_stats { int chain_lookup_hits; int chain_lookup_misses; int hardirqs_on_events; int hardirqs_off_events; int redundant_hardirqs_on; int redundant_hardirqs_off; int softirqs_on_events; int softirqs_off_events; int redundant_softirqs_on; int redundant_softirqs_off; int nr_unused_locks; int nr_cyclic_checks; int nr_cyclic_check_recursions; int nr_find_usage_forwards_checks; int nr_find_usage_forwards_recursions; int nr_find_usage_backwards_checks; int nr_find_usage_backwards_recursions; }; DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats); #define __debug_atomic_inc(ptr) \ this_cpu_inc(lockdep_stats.ptr); #define debug_atomic_inc(ptr) { \ WARN_ON_ONCE(!irqs_disabled()); \ __this_cpu_inc(lockdep_stats.ptr); \ } #define debug_atomic_dec(ptr) { \ WARN_ON_ONCE(!irqs_disabled()); \ __this_cpu_dec(lockdep_stats.ptr); \ } #define debug_atomic_read(ptr) ({ \ struct lockdep_stats *__cpu_lockdep_stats; \ unsigned long long __total = 0; \ int __cpu; \ for_each_possible_cpu(__cpu) { \ __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu); \ __total += __cpu_lockdep_stats->ptr; \ } \ __total; \ }) #else # define __debug_atomic_inc(ptr) do { } while (0) # define debug_atomic_inc(ptr) do { } while (0) # define debug_atomic_dec(ptr) do { } while (0) # define debug_atomic_read(ptr) 0 #endif |