2 * Runtime locking correctness validator
4 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * see Documentation/lockdep-design.txt for more details.
9 #ifndef __LINUX_LOCKDEP_WRAPPER_H
10 #define __LINUX_LOCKDEP_WRAPPER_H
12 #include_next <linux/lockdep.h>
14 #include <linux/version.h>
15 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
22 #include <linux/linkage.h>
23 #include <linux/list.h>
24 #include <linux/debug_locks.h>
25 #include <linux/stacktrace.h>
28 * Lock-class usage-state bits:
34 LOCK_ENABLED_SOFTIRQS,
35 LOCK_ENABLED_HARDIRQS,
36 LOCK_USED_IN_HARDIRQ_READ,
37 LOCK_USED_IN_SOFTIRQ_READ,
38 LOCK_ENABLED_SOFTIRQS_READ,
39 LOCK_ENABLED_HARDIRQS_READ,
44 * Usage-state bitmasks:
46 #define LOCKF_USED (1 << LOCK_USED)
47 #define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ)
48 #define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ)
49 #define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS)
50 #define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS)
52 #define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS)
53 #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
55 #define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ)
56 #define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ)
57 #define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ)
58 #define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ)
60 #define LOCKF_ENABLED_IRQS_READ \
61 (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ)
62 #define LOCKF_USED_IN_IRQ_READ \
63 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
65 #define MAX_LOCKDEP_SUBCLASSES 8UL
68 * Lock-classes are keyed via unique addresses, by embedding the
69 * lockclass-key into the kernel (or module) .data section. (For
70 * static locks we use the lock address itself as the key.)
72 struct lockdep_subclass_key {
74 } __attribute__ ((__packed__));
76 struct lock_class_key {
77 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
81 * The lock-class itself:
87 struct list_head hash_entry;
90 * global list of all lock-classes:
92 struct list_head lock_entry;
94 struct lockdep_subclass_key *key;
95 unsigned int subclass;
98 * IRQ/softirq usage tracking bits:
100 unsigned long usage_mask;
101 struct stack_trace usage_traces[LOCK_USAGE_STATES];
104 * These fields represent a directed graph of lock dependencies,
105 * to every node we attach a list of "forward" and a list of
106 * "backward" graph nodes.
108 struct list_head locks_after, locks_before;
111 * Generation counter, when doing certain classes of graph walking,
112 * to ensure that we check one node only once:
114 unsigned int version;
117 * Statistics counter:
124 #ifdef CONFIG_LOCK_STAT
125 unsigned long contention_point[4];
129 #ifdef CONFIG_LOCK_STAT
138 bounce_acquired_write,
139 bounce_acquired_read,
140 bounce_contended_write,
141 bounce_contended_read,
144 bounce_acquired = bounce_acquired_write,
145 bounce_contended = bounce_contended_write,
148 struct lock_class_stats {
149 unsigned long contention_point[4];
150 struct lock_time read_waittime;
151 struct lock_time write_waittime;
152 struct lock_time read_holdtime;
153 struct lock_time write_holdtime;
154 unsigned long bounces[nr_bounce_types];
157 struct lock_class_stats lock_stats(struct lock_class *class);
158 void clear_lock_stats(struct lock_class *class);
162 * Map the lock object (the lock instance) to the lock-class object.
163 * This is embedded into specific lock instances:
166 struct lock_class_key *key;
167 struct lock_class *class_cache;
169 #ifdef CONFIG_LOCK_STAT
175 * Every lock has a list of other locks that were taken after it.
176 * We only grow the list, never remove from it:
179 struct list_head entry;
180 struct lock_class *class;
181 struct stack_trace trace;
186 * We record lock dependency chains, so that we can cache them:
189 struct list_head entry;
195 * One-way hash of the dependency chain up to this point. We
196 * hash the hashes step by step as the dependency chain grows.
198 * We use it for dependency-caching and we skip detection
199 * passes and dependency-updates if there is a cache-hit, so
200 * it is absolutely critical for 100% coverage of the validator
201 * to have a unique key value for every unique dependency path
202 * that can occur in the system, to make a unique hash value
203 * as likely as possible - hence the 64-bit width.
205 * The task struct holds the current hash value (initialized
206 * with zero), here we store the previous hash value:
209 struct lock_class *class;
210 unsigned long acquire_ip;
211 struct lockdep_map *instance;
213 #ifdef CONFIG_LOCK_STAT
218 * The lock-stack is unified in that the lock chains of interrupt
219 * contexts nest ontop of process context chains, but we 'separate'
220 * the hashes by starting with 0 if we cross into an interrupt
221 * context, and we also keep do not add cross-context lock
222 * dependencies - the lock usage graph walking covers that area
223 * anyway, and we'd just unnecessarily increase the number of
224 * dependencies otherwise. [Note: hardirq and softirq contexts
225 * are separated from each other too.]
227 * The following field is used to detect when we cross into an
238 * Initialization, self-test and debugging-output methods:
240 extern void lockdep_init(void);
241 extern void lockdep_info(void);
242 extern void lockdep_reset(void);
243 extern void lockdep_reset_lock(struct lockdep_map *lock);
244 extern void lockdep_free_key_range(void *start, unsigned long size);
246 extern void lockdep_off(void);
247 extern void lockdep_on(void);
250 * These methods are used by specific locking variants (spinlocks,
251 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
255 extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
256 struct lock_class_key *key, int subclass);
259 * Reinitialize a lock key - for cases where there is special locking or
260 * special initialization of locks so that the validator gets the scope
261 * of dependencies wrong: they are either too broad (they need a class-split)
262 * or they are too narrow (they suffer from a false class-split):
264 #define lockdep_set_class(lock, key) \
265 lockdep_init_map(&(lock)->dep_map, #key, key, 0)
266 #define lockdep_set_class_and_name(lock, key, name) \
267 lockdep_init_map(&(lock)->dep_map, name, key, 0)
268 #define lockdep_set_class_and_subclass(lock, key, sub) \
269 lockdep_init_map(&(lock)->dep_map, #key, key, sub)
270 #define lockdep_set_subclass(lock, sub) \
271 lockdep_init_map(&(lock)->dep_map, #lock, \
272 (lock)->dep_map.key, sub)
279 * 0: exclusive (write) acquire
280 * 1: read-acquire (no recursion allowed)
281 * 2: read-acquire with same-instance recursion allowed
286 * 1: simple checks (freeing, held-at-exit-time, etc.)
289 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
290 int trylock, int read, int check, unsigned long ip);
292 extern void lock_release(struct lockdep_map *lock, int nested,
295 # define INIT_LOCKDEP .lockdep_recursion = 0,
297 #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
301 static inline void lockdep_off(void)
305 static inline void lockdep_on(void)
309 # define lock_acquire(l, s, t, r, c, i) do { } while (0)
310 # define lock_release(l, n, i) do { } while (0)
311 # define lockdep_init() do { } while (0)
312 # define lockdep_info() do { } while (0)
313 # define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0)
314 # define lockdep_set_class(lock, key) do { (void)(key); } while (0)
315 # define lockdep_set_class_and_name(lock, key, name) \
316 do { (void)(key); } while (0)
317 #define lockdep_set_class_and_subclass(lock, key, sub) \
318 do { (void)(key); } while (0)
319 #define lockdep_set_subclass(lock, sub) do { } while (0)
321 # define INIT_LOCKDEP
322 # define lockdep_reset() do { debug_locks = 1; } while (0)
323 # define lockdep_free_key_range(start, size) do { } while (0)
325 * The class key takes no space if lockdep is disabled:
327 struct lock_class_key { };
329 #define lockdep_depth(tsk) (0)
331 #endif /* !LOCKDEP */
333 #ifdef CONFIG_LOCK_STAT
335 extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
336 extern void lock_acquired(struct lockdep_map *lock);
338 #define LOCK_CONTENDED(_lock, try, lock) \
341 lock_contended(&(_lock)->dep_map, _RET_IP_); \
344 lock_acquired(&(_lock)->dep_map); \
347 #else /* CONFIG_LOCK_STAT */
349 #define lock_contended(lockdep_map, ip) do {} while (0)
350 #define lock_acquired(lockdep_map) do {} while (0)
352 #define LOCK_CONTENDED(_lock, try, lock) \
355 #endif /* CONFIG_LOCK_STAT */
357 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS)
358 extern void early_init_irq_lock_class(void);
360 static inline void early_init_irq_lock_class(void)
365 #ifdef CONFIG_TRACE_IRQFLAGS
366 extern void early_boot_irqs_off(void);
367 extern void early_boot_irqs_on(void);
368 extern void print_irqtrace_events(struct task_struct *curr);
370 static inline void early_boot_irqs_off(void)
373 static inline void early_boot_irqs_on(void)
376 static inline void print_irqtrace_events(struct task_struct *curr)
382 * For trivial one-depth nesting of a lock-class, the following
383 * global define can be used. (Subsystems with multiple levels
384 * of nesting should define their own lock-nesting subclasses.)
386 #define SINGLE_DEPTH_NESTING 1
389 * Map the dependency ops to NOP or to real lockdep ops, depending
390 * on the per lock-class debug mode:
393 #ifdef CONFIG_DEBUG_LOCK_ALLOC
394 # ifdef CONFIG_PROVE_LOCKING
395 # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
397 # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
399 # define spin_release(l, n, i) lock_release(l, n, i)
401 # define spin_acquire(l, s, t, i) do { } while (0)
402 # define spin_release(l, n, i) do { } while (0)
405 #ifdef CONFIG_DEBUG_LOCK_ALLOC
406 # ifdef CONFIG_PROVE_LOCKING
407 # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
408 # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i)
410 # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
411 # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i)
413 # define rwlock_release(l, n, i) lock_release(l, n, i)
415 # define rwlock_acquire(l, s, t, i) do { } while (0)
416 # define rwlock_acquire_read(l, s, t, i) do { } while (0)
417 # define rwlock_release(l, n, i) do { } while (0)
420 #ifdef CONFIG_DEBUG_LOCK_ALLOC
421 # ifdef CONFIG_PROVE_LOCKING
422 # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
424 # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
426 # define mutex_release(l, n, i) lock_release(l, n, i)
428 # define mutex_acquire(l, s, t, i) do { } while (0)
429 # define mutex_release(l, n, i) do { } while (0)
432 #ifdef CONFIG_DEBUG_LOCK_ALLOC
433 # ifdef CONFIG_PROVE_LOCKING
434 # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
435 # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i)
437 # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
438 # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i)
440 # define rwsem_release(l, n, i) lock_release(l, n, i)
442 # define rwsem_acquire(l, s, t, i) do { } while (0)
443 # define rwsem_acquire_read(l, s, t, i) do { } while (0)
444 # define rwsem_release(l, n, i) do { } while (0)
447 #endif /* linux kernel < 2.6.18 */
449 #endif /* __LINUX_LOCKDEP_WRAPPER_H */