提交 d801dbbb 编写于 作者: W weiqingv

lite-lockdep: add basic lock acquisition records

ECNU inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I5R8DS

--------------------------------

Construct a new tool for lightweight lock traces. The basic data structures and hook points are similar
to Lockdep in this commit. Various lock instances are mapped to lite lock classes. The initialization,
acquisition and release of lite lock classes are hooked to obtain lock information. The held locks of
each task_struct are dynamically recorded. When running into some abnormal cases such as hung tasks,
the lock states are supported to be output. Differ from Lockdep, locks are only recorded without
coupled context and circular dependency checks, which leads to lower overhead. For now, mutexes,
spinlocks, and rwsems are supported.
Signed-off-by: Nweiqingv <709088312@qq.com>
上级 f996d7f7
......@@ -991,6 +991,27 @@ void lockdep_annotate_inode_mutex_key(struct inode *inode)
EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key);
#endif
#ifdef CONFIG_LITE_LOCKDEP
void lockdep_annotate_inode_mutex_key(struct inode *inode)
{
if (S_ISDIR(inode->i_mode)) {
struct file_system_type *type = inode->i_sb->s_type;
/* Set new key only if filesystem hasn't already changed it */
if (lite_lockdep_match_class(&inode->i_rwsem, &type->i_mutex_key)) {
/*
* ensure nobody is actually holding i_mutex
*/
// mutex_destroy(&inode->i_mutex);
init_rwsem(&inode->i_rwsem);
lite_lockdep_set_class(&inode->i_rwsem,
&type->i_mutex_dir_key);
}
}
}
EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key);
#endif
/**
* unlock_new_inode - clear the I_NEW state and wake up any waiters
* @inode: new inode to unlock
......
......@@ -211,6 +211,7 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
s->s_user_ns = get_user_ns(user_ns);
init_rwsem(&s->s_umount);
lockdep_set_class(&s->s_umount, &type->s_umount_key);
lite_lockdep_set_class(&s->s_umount, &type->s_umount_key);
/*
* sget() can have s_umount recursion.
*
......@@ -254,6 +255,7 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
atomic_set(&s->s_active, 1);
mutex_init(&s->s_vfs_rename_mutex);
lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
lite_lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
init_rwsem(&s->s_dquot.dqio_sem);
s->s_maxbytes = MAX_NON_LFS;
s->s_op = &default_op;
......
......@@ -65,7 +65,7 @@ static inline void complete_release(struct completion *x) {}
* This macro declares and initializes a completion structure on the kernel
* stack.
*/
#ifdef CONFIG_LOCKDEP
#if defined(CONFIG_LOCKDEP) || defined(CONFIG_LITE_LOCKDEP)
# define DECLARE_COMPLETION_ONSTACK(work) \
struct completion work = COMPLETION_INITIALIZER_ONSTACK(work)
# define DECLARE_COMPLETION_ONSTACK_MAP(work, map) \
......
......@@ -2980,7 +2980,7 @@ extern struct inode *find_inode_rcu(struct super_block *, unsigned long,
extern struct inode *find_inode_by_ino_rcu(struct super_block *, unsigned long);
extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *);
extern int insert_inode_locked(struct inode *);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_LITE_LOCKDEP)
extern void lockdep_annotate_inode_mutex_key(struct inode *inode);
#else
static inline void lockdep_annotate_inode_mutex_key(struct inode *inode) { };
......
#ifndef __LINUX_LITE_LOCKDEP_H
#define __LINUX_LITE_LOCKDEP_H
#include <linux/lite_lockdep_types.h>
struct task_struct;
/* sysctl */
extern int lite_lockdep;
#ifdef CONFIG_LITE_LOCKDEP
#include <linux/lite_lockdep_types.h>
extern void lite_lock_acquire(struct lite_lockdep_map *lock, unsigned int subclass,
int trylock, int read, int check,
struct lite_lockdep_map *nest_lock, unsigned long ip);
extern void lite_lock_release(struct lite_lockdep_map *lock, unsigned long ip);
#define lite_lock_acquire_exclusive(l, s, t, n, i) lite_lock_acquire(l, s, t, 0, 1, n, i)
#define lite_lock_acquire_shared(l, s, t, n, i) lite_lock_acquire(l, s, t, 1, 1, n, i)
#define lite_lock_acquire_shared_recursive(l, s, t, n, i) lite_lock_acquire(l, s, t, 2, 1, n, i)
#define lite_spin_acquire(l, s, t, i) lite_lock_acquire_exclusive(l, s, t, NULL, i)
#define lite_spin_acquire_nest(l, s, t, n, i) lite_lock_acquire_exclusive(l, s, t, n, i)
#define lite_spin_release(l, i) lite_lock_release(l, i)
#define lite_mutex_acquire(l, s, t, i) lite_lock_acquire_exclusive(l, s, t, NULL, i)
#define lite_mutex_acquire_nest(l, s, t, n, i) lite_lock_acquire_exclusive(l, s, t, n, i)
#define lite_mutex_release(l, i) lite_lock_release(l, i)
#define lite_rwsem_acquire(l, s, t, i) lite_lock_acquire_exclusive(l, s, t, NULL, i)
#define lite_rwsem_acquire_nest(l, s, t, n, i) lite_lock_acquire_exclusive(l, s, t, n, i)
#define lite_rwsem_acquire_read(l, s, t, i) lite_lock_acquire_shared(l, s, t, NULL, i)
#define lite_rwsem_release(l, i) lite_lock_release(l, i)
struct lite_held_lock {
unsigned long acquire_ip;
struct lite_lockdep_map *instance;
struct lite_lockdep_map *nest_lock;
unsigned int subclass;
unsigned int class_idx:MAX_LITE_LOCKDEP_KEYS_BITS;
unsigned int trylock:1;
unsigned int read:2;
unsigned int check:1;
};
extern void lite_lockdep_print_held_locks(struct task_struct *p);
extern void lite_debug_show_all_locks(void);
extern void lite_lockdep_init_map_type(struct lite_lockdep_map *lock, const char *name,
struct lite_lock_class_key *key, int subclass);
#define lite_lockdep_match_class(lock, key) \
lite_lockdep_match_key(&(lock)->lite_dep_map, key)
static inline int lite_lockdep_match_key(struct lite_lockdep_map *lock,
struct lite_lock_class_key *key)
{
return lock->key == key;
}
static inline void
lite_lockdep_init_map(struct lite_lockdep_map *lock, const char *name,
struct lite_lock_class_key *key, int subclass)
{
lite_lockdep_init_map_type(lock, name, key, subclass);
}
#define lite_lockdep_set_class(lock, key) \
lite_lockdep_init_map(&(lock)->lite_dep_map, #key, key, 0)
#define lite_lockdep_set_class_and_name(lock, key, name) \
lite_lockdep_init_map(&(lock)->lite_dep_map, name, key, 0)
#else /* !CONFIG_LITE_LOCKDEP */
# define lite_lock_acquire(l, s, t, r, c, n, i) do { } while (0)
# define lite_lock_release(l, i) do { } while (0)
# define lite_lockdep_set_class(l, m) do { } while (0)
#endif /* CONFIG_LITE_LOCKDEP */
#endif /* __LINUX_LITE_LOCKDEP_H */
\ No newline at end of file
#ifndef __LINUX_LITE_LOCKDEP_TYPES_H
#define __LINUX_LITE_LOCKDEP_TYPES_H
#include <linux/types.h>
#ifdef CONFIG_LITE_LOCKDEP
#define MAX_LITE_LOCKDEP_KEYS_BITS 13
#define MAX_LITE_LOCKDEP_KEYS (1UL << MAX_LITE_LOCKDEP_KEYS_BITS)
#define MAX_LITE_LOCKDEP_CHAINS_BITS 16
struct lite_lock_class_sub_key {
char __one_byte;
} __attribute__ ((__packed__));
/* hash_entry is used to keep track of dynamically allocated keys. */
struct lite_lock_class_key {
union {
struct hlist_node hash_entry;
struct lite_lock_class_sub_key sub_key[1];
};
};
struct lite_lock_class {
/*
* class-hash:
*/
struct hlist_node hash_entry;
struct list_head lock_entry;
const struct lite_lock_class_sub_key *key;
const char *name;
} __no_randomize_layout;
/*
* Map the lock object (the lock instance) to the lock-class object.
* This is embedded into specific lock instances:
*/
struct lite_lockdep_map {
struct lite_lock_class_key *key;
struct lite_lock_class *class;
const char *name;
};
#else /* !CONFIG_LITE_LOCKDEP */
struct lite_lock_class_key { };
struct lite_lockdep_map { };
#endif /* CONFIG_LITE_LOCKDEP */
#endif /* __LINUX_LITE_LOCKDEP_TYPES_H */
......@@ -189,6 +189,15 @@ struct lockdep_map {
struct pin_cookie { unsigned int val; };
#elif defined CONFIG_LITE_LOCKDEP
#include <linux/lite_lockdep_types.h>
#define lock_class_key lite_lock_class_key
#define lockdep_map lite_lockdep_map
struct pin_cookie { };
#else /* !CONFIG_LOCKDEP */
/*
......
......@@ -14,6 +14,7 @@
#include <asm/current.h>
#include <linux/list.h>
#include <linux/spinlock_types.h>
#include <linux/lite_lockdep.h>
#include <linux/lockdep.h>
#include <linux/atomic.h>
#include <asm/processor.h>
......@@ -63,6 +64,9 @@ struct mutex {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
#ifdef CONFIG_LITE_LOCKDEP
struct lite_lockdep_map lite_dep_map;
#endif
};
struct ww_class;
......@@ -125,6 +129,11 @@ do { \
.name = #lockname, \
.wait_type_inner = LD_WAIT_SLEEP, \
}
#elif defined(CONFIG_LITE_LOCKDEP)
# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
, .lite_dep_map = { \
.name = #lockname, \
}
#else
# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
#endif
......@@ -154,7 +163,7 @@ extern bool mutex_is_locked(struct mutex *lock);
* See kernel/locking/mutex.c for detailed documentation of these APIs.
* Also see Documentation/locking/mutex-design.rst.
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_LITE_LOCKDEP)
extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
......@@ -169,12 +178,19 @@ extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass);
#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
#define mutex_lock_io(lock) mutex_lock_io_nested(lock, 0)
#ifdef CONFIG_LITE_LOCKDEP
#define mutex_lock_nest_lock(lock, nest_lock) \
_mutex_lock_nest_lock(lock, &(nest_lock)->lite_dep_map);
#else
#define mutex_lock_nest_lock(lock, nest_lock) \
do { \
typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
_mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
} while (0)
#endif
#else
extern void mutex_lock(struct mutex *lock);
extern int __must_check mutex_lock_interruptible(struct mutex *lock);
......
......@@ -51,6 +51,9 @@ struct rw_semaphore {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
#ifdef CONFIG_LITE_LOCKDEP
struct lite_lockdep_map lite_dep_map;
#endif
};
/* In all implementations count != 0 means locked */
......@@ -70,6 +73,11 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
.name = #lockname, \
.wait_type_inner = LD_WAIT_SLEEP, \
},
#elif defined(CONFIG_LITE_LOCKDEP)
# define __RWSEM_DEP_MAP_INIT(lockname) \
.lite_dep_map = { \
.name = #lockname, \
},
#else
# define __RWSEM_DEP_MAP_INIT(lockname)
#endif
......@@ -157,7 +165,7 @@ extern void up_write(struct rw_semaphore *sem);
*/
extern void downgrade_write(struct rw_semaphore *sem);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_LITE_LOCKDEP)
/*
* nested locking. NOTE: rwsems are not allowed to recurse
* (which occurs if the same task tries to acquire the same
......@@ -177,12 +185,22 @@ extern void down_write_nested(struct rw_semaphore *sem, int subclass);
extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass);
extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
#ifdef CONFIG_LITE_LOCKDEP
# define down_write_nest_lock(sem, nest_lock) \
do { \
typecheck(struct lite_lockdep_map *, &(nest_lock)->lite_dep_map); \
_down_write_nest_lock(sem, &(nest_lock)->lite_dep_map); \
} while (0);
#else
# define down_write_nest_lock(sem, nest_lock) \
do { \
typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
_down_write_nest_lock(sem, &(nest_lock)->dep_map); \
} while (0);
#endif
/*
* Take/release a lock when not the owner will release it.
*
......
......@@ -1068,6 +1068,12 @@ struct task_struct {
struct held_lock held_locks[MAX_LOCK_DEPTH];
#endif
#ifdef CONFIG_LITE_LOCKDEP
# define MAX_LITE_LOCK_DEPTH 48UL
int lite_lockdep_depth;
struct lite_held_lock held_locks[MAX_LITE_LOCK_DEPTH];
#endif
#if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP)
unsigned int in_ubsan;
#endif
......
......@@ -32,7 +32,11 @@ static inline void sema_init(struct semaphore *sem, int val)
{
static struct lock_class_key __key;
*sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
#ifdef CONFIG_LITE_LOCKDEP
lite_lockdep_init_map(&sem->lock.lite_dep_map, "semaphore->lock", &__key, 0);
#else
lockdep_init_map(&sem->lock.dep_map, "semaphore->lock", &__key, 0);
#endif
}
extern void down(struct semaphore *sem);
......
......@@ -56,6 +56,7 @@
#include <linux/kernel.h>
#include <linux/stringify.h>
#include <linux/bottom_half.h>
#include <linux/lite_lockdep.h>
#include <linux/lockdep.h>
#include <asm/barrier.h>
#include <asm/mmiowb.h>
......@@ -224,7 +225,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
#define raw_spin_lock(lock) _raw_spin_lock(lock)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_LITE_LOCKDEP)
# define raw_spin_lock_nested(lock, subclass) \
_raw_spin_lock_nested(lock, subclass)
......@@ -252,7 +253,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
flags = _raw_spin_lock_irqsave(lock); \
} while (0)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_LITE_LOCKDEP)
#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
typecheck(unsigned long, flags); \
......
......@@ -87,7 +87,11 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
{
preempt_disable();
if (do_raw_spin_trylock(lock)) {
#ifdef CONFIG_LITE_LOCKDEP
lite_spin_acquire(&lock->lite_dep_map, 0, 1, _RET_IP_);
#else
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
#endif
return 1;
}
preempt_enable();
......@@ -99,7 +103,8 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
* even on CONFIG_PREEMPTION, because lockdep assumes that interrupts are
* not re-enabled during lock-acquire (which the preempt-spin-ops do):
*/
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) || \
defined(CONFIG_LITE_LOCKDEP)
static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
{
......@@ -107,7 +112,11 @@ static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
local_irq_save(flags);
preempt_disable();
#ifdef CONFIG_LITE_LOCKDEP
lite_spin_acquire(&lock->lite_dep_map, 0, 0, _RET_IP_);
#else
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
#endif
/*
* On lockdep we dont want the hand-coded irq-enable of
* do_raw_spin_lock_flags() code, because lockdep assumes
......@@ -125,29 +134,48 @@ static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
{
local_irq_disable();
preempt_disable();
#ifdef CONFIG_LITE_LOCKDEP
lite_spin_acquire(&lock->lite_dep_map, 0, 0, _RET_IP_);
do_raw_spin_lock(lock);
#else
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
#endif
}
static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
{
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
#ifdef CONFIG_LITE_LOCKDEP
lite_spin_acquire(&lock->lite_dep_map, 0, 0, _RET_IP_);
do_raw_spin_lock(lock);
#else
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
#endif
}
static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
preempt_disable();
#ifdef CONFIG_LITE_LOCKDEP
lite_spin_acquire(&lock->lite_dep_map, 0, 0, _RET_IP_);
do_raw_spin_lock(lock);
#else
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
#endif
}
#endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */
#endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC || CONFIG_LITE_LOCKDEP */
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
#ifdef CONFIG_LITE_LOCKDEP
lite_spin_release(&lock->lite_dep_map, _RET_IP_);
#else
spin_release(&lock->dep_map, _RET_IP_);
#endif
do_raw_spin_unlock(lock);
preempt_enable();
}
......@@ -155,7 +183,11 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
unsigned long flags)
{
#ifdef CONFIG_LITE_LOCKDEP
lite_spin_release(&lock->lite_dep_map, _RET_IP_);
#else
spin_release(&lock->dep_map, _RET_IP_);
#endif
do_raw_spin_unlock(lock);
local_irq_restore(flags);
preempt_enable();
......@@ -163,7 +195,11 @@ static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
{
#ifdef CONFIG_LITE_LOCKDEP
lite_spin_release(&lock->lite_dep_map, _RET_IP_);
#else
spin_release(&lock->dep_map, _RET_IP_);
#endif
do_raw_spin_unlock(lock);
local_irq_enable();
preempt_enable();
......@@ -171,7 +207,11 @@ static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
{
#ifdef CONFIG_LITE_LOCKDEP
lite_spin_release(&lock->lite_dep_map, _RET_IP_);
#else
spin_release(&lock->dep_map, _RET_IP_);
#endif
do_raw_spin_unlock(lock);
__local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
}
......@@ -180,7 +220,11 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
{
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
if (do_raw_spin_trylock(lock)) {
#ifdef CONFIG_LITE_LOCKDEP
lite_spin_acquire(&lock->lite_dep_map, 0, 1, _RET_IP_);
#else
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
#endif
return 1;
}
__local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
......
......@@ -16,6 +16,7 @@
#endif
#include <linux/lockdep_types.h>
#include <linux/lite_lockdep_types.h>
typedef struct raw_spinlock {
arch_spinlock_t raw_lock;
......@@ -26,6 +27,9 @@ typedef struct raw_spinlock {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
#ifdef CONFIG_LITE_LOCKDEP
struct lite_lockdep_map lite_dep_map;
#endif
} raw_spinlock_t;
#define SPINLOCK_MAGIC 0xdead4ead
......@@ -43,6 +47,15 @@ typedef struct raw_spinlock {
.name = #lockname, \
.wait_type_inner = LD_WAIT_CONFIG, \
}
#elif defined(CONFIG_LITE_LOCKDEP)
# define RAW_SPIN_DEP_MAP_INIT(lockname) \
.lite_dep_map = { \
.name = #lockname, \
}
# define SPIN_DEP_MAP_INIT(lockname) \
.lite_dep_map = { \
.name = #lockname, \
}
#else
# define RAW_SPIN_DEP_MAP_INIT(lockname)
# define SPIN_DEP_MAP_INIT(lockname)
......@@ -79,6 +92,14 @@ typedef struct spinlock {
struct lockdep_map dep_map;
};
#endif
#ifdef CONFIG_LITE_LOCKDEP
# define LOCK_PADSIZE (offsetof(struct raw_spinlock, lite_dep_map))
struct {
u8 __padding[LOCK_PADSIZE];
struct lite_lockdep_map lite_dep_map;
};
#endif
};
} spinlock_t;
......
......@@ -75,7 +75,7 @@ extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name
__init_swait_queue_head((q), #q, &__key); \
} while (0)
#ifdef CONFIG_LOCKDEP
#if defined(CONFIG_LOCKDEP) || defined(CONFIG_LITE_LOCKDEP)
# define __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
({ init_swait_queue_head(&name); name; })
# define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \
......
......@@ -69,7 +69,7 @@ extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *n
__init_waitqueue_head((wq_head), #wq_head, &__key); \
} while (0)
#ifdef CONFIG_LOCKDEP
#if defined(CONFIG_LOCKDEP) || defined(CONFIG_LITE_LOCKDEP)
# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
({ init_waitqueue_head(&name); name; })
# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
......
......@@ -39,7 +39,7 @@ struct ww_acquire_ctx {
struct ww_class *ww_class;
struct ww_mutex *contending_lock;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_LITE_LOCKDEP)
struct lockdep_map dep_map;
#endif
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
......
#undef TRACE_SYSTEM
#define TRACE_SYSTEM lite_lock
#if !defined(_TRACE_LITE_LOCK_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_LITE_LOCK_H
#include <linux/lite_lockdep.h>
#include <linux/tracepoint.h>
#ifdef CONFIG_LITE_LOCKDEP
TRACE_EVENT(lock_acquire_lite,
TP_PROTO(struct lite_lockdep_map *lock, unsigned int subclass,
int trylock, int read, int check,
struct lite_lockdep_map *next_lock, unsigned long ip),
TP_ARGS(lock, subclass, trylock, read, check, next_lock, ip),
TP_STRUCT__entry(
__field(unsigned int, flags)
__string(name, lock->name)
__field(void *, lockdep_addr)
),
TP_fast_assign(
__entry->flags = (trylock ? 1 : 0) | (read ? 2 : 0);
__assign_str(name, lock->name);
__entry->lockdep_addr = lock;
),
TP_printk("======== %p %s%s%s", __entry->lockdep_addr,
(__entry->flags & 1) ? "try " : "",
(__entry->flags & 2) ? "read " : "",
__get_str(name))
);
DECLARE_EVENT_CLASS(lock,
TP_PROTO(struct lite_lockdep_map *lock, unsigned long ip),
TP_ARGS(lock, ip),
TP_STRUCT__entry(
__string( name, lock->name )
__field( void *, lockdep_addr )
),
TP_fast_assign(
__assign_str(name, lock->name);
__entry->lockdep_addr = lock;
),
TP_printk("======== %p %s", __entry->lockdep_addr, __get_str(name))
);
DEFINE_EVENT(lock, lock_release_lite,
TP_PROTO(struct lite_lockdep_map *lock, unsigned long ip),
TP_ARGS(lock, ip)
);
#endif /* CONFIG_LITE_LOCKDEP */
#endif /* _TRACE_LITE_LOCK_H */
#include <trace/define_trace.h>
\ No newline at end of file
......@@ -139,6 +139,10 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
if (sysctl_hung_task_all_cpu_backtrace)
hung_task_show_all_bt = true;
#ifdef CONFIG_LITE_LOCKDEP
lite_debug_show_all_locks();
#endif
}
touch_nmi_watchdog();
......
......@@ -17,6 +17,7 @@ endif
obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
obj-$(CONFIG_LOCKDEP) += lockdep.o
obj-$(CONFIG_LITE_LOCKDEP) += lite_lockdep.o
ifeq ($(CONFIG_PROC_FS),y)
obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
endif
......
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/hash.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/debug_locks.h>
#include <linux/irqflags.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/kallsyms.h>
#include <linux/nmi.h>
#include <linux/utsname.h>
#include <linux/jhash.h>
#include <linux/hashtable.h>
#include <linux/sysctl.h>
#define CREATE_TRACE_POINTS
#include <trace/events/lite_lock.h>
#ifdef CONFIG_LITE_LOCKDEP
int lite_lockdep = 1;
module_param(lite_lockdep, int, 0644);
#else
#define lite_lockdep 0
#endif
/*
* The hash-table for lite-lockdep classes:
*/
#define LITE_CLASSHASH_BITS (MAX_LITE_LOCKDEP_KEYS_BITS - 1)
#define LITE_CLASSHASH_SIZE (1UL << LITE_CLASSHASH_BITS)
#define __liteclasshashfn(key) hash_long((unsigned long)key, LITE_CLASSHASH_BITS)
#define liteclasshashentry(key) (lite_classhash_table + __liteclasshashfn((key)))
static struct hlist_head lite_classhash_table[LITE_CLASSHASH_SIZE];
#define LITE_KEYHASH_BITS (MAX_LITE_LOCKDEP_KEYS_BITS - 1)
#define LITE_KEYHASH_SIZE (1UL << LITE_KEYHASH_BITS)
static struct hlist_head lite_lock_keys_hash[LITE_KEYHASH_SIZE];
unsigned long nr_lite_lock_classes;
struct lite_lock_class lite_lock_classes[MAX_LITE_LOCKDEP_KEYS];
static DECLARE_BITMAP(lite_lock_classes_in_use, MAX_LITE_LOCKDEP_KEYS);
static LIST_HEAD(all_lite_lock_classes);
static LIST_HEAD(free_lite_lock_classes);
/*
* lite_lockdep_lock: protects the reachability graph, and
* other shared data structures.
*/
static arch_spinlock_t __lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static struct task_struct *__owner;
struct lite_lock_class_key __lite_lockdep_no_validate__;
EXPORT_SYMBOL_GPL(__lite_lockdep_no_validate__);
static inline void lite_lockdep_lock(void)
{
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
arch_spin_lock(&__lock);
__owner = current;
}
static inline void lite_lockdep_unlock(void)
{
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
if (debug_locks && DEBUG_LOCKS_WARN_ON(__owner != current))
return;
__owner = NULL;
arch_spin_unlock(&__lock);
}
static int lite_graph_lock(void)
{
lite_lockdep_lock();
if (!debug_locks) {
lite_lockdep_unlock();
return 0;
}
return 1;
}
static inline void lite_graph_unlock(void)
{
lite_lockdep_unlock();
}
static inline int lite_debug_locks_off_graph_unlock(void)
{
int ret = debug_locks_off();
lite_lockdep_unlock();
return ret;
}
static inline
struct hlist_head *litekeyhashentry(const struct lock_class_key *key)
{
unsigned long hash = hash_long((uintptr_t)key, LITE_KEYHASH_BITS);
return lite_lock_keys_hash + hash;
}
/**
* Judge if the address of a static object, same
* as the one in lockdep.c.
*/
#ifdef __KERNEL__
static int static_obj(const void *obj)
{
unsigned long start = (unsigned long) &_stext,
end = (unsigned long) &_end,
addr = (unsigned long) obj;
if (arch_is_kernel_initmem_freed(addr))
return 0;
if ((addr >= start) && (addr < end))
return 1;
if (arch_is_kernel_data(addr))
return 1;
if (is_kernel_percpu_address(addr))
return 1;
return is_module_address(addr) || is_module_percpu_address(addr);
}
#endif
/* Check whether a key has been registered as a dynamic key,
* same as the one in lockdep.c.
*/
static bool is_dynamic_key(const struct lite_lock_class_key *key)
{
struct hlist_head *hash_head;
struct lite_lock_class_key *k;
bool found = false;
if (WARN_ON_ONCE(static_obj(key)))
return false;
if (!debug_locks)
return true;
hash_head = litekeyhashentry(key);
rcu_read_lock();
hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
if (k == key) {
found = true;
break;
}
}
rcu_read_unlock();
return found;
}
/**
* Assign lock keys, same as the one in lockdep.c.
*/
static bool assign_lite_lock_key(struct lite_lockdep_map *lock)
{
unsigned long can_addr, addr = (unsigned long)lock;
if (__is_kernel_percpu_address(addr, &can_addr))
lock->key = (void *)can_addr;
else if (__is_module_percpu_address(addr, &can_addr))
lock->key = (void *)can_addr;
else if (static_obj(lock))
lock->key = (void *)lock;
else {
debug_locks_off();
pr_err("INFO: trying to register non-static key.\n");
pr_err("you didn't initialize this object before use?\n");
pr_err("turning off the locking correctness validator.\n");
dump_stack();
return false;
}
return true;
}
static inline struct lite_lock_class *lite_hlock_class(struct lite_held_lock *hlock)
{
unsigned int class_idx = hlock->class_idx;
barrier();
if (!test_bit(class_idx, lite_lock_classes_in_use)) {
DEBUG_LOCKS_WARN_ON(1);
return NULL;
}
return lite_lock_classes + class_idx;
}
const char *__get_key_name(const struct lite_lock_class_sub_key *key, char *str)
{
return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
}
static void lite_print_lock_name(struct lite_lock_class *class)
{
char str[KSYM_NAME_LEN];
const char *name;
name = class->name;
if (!name) {
name = __get_key_name(class->key, str);
printk(KERN_CONT "%s", name);
} else {
printk(KERN_CONT "%s", name);
}
}
static void lite_print_lock(struct lite_held_lock *hlock)
{
struct lite_lock_class *lock = lite_hlock_class(hlock);
if (!lock) {
printk(KERN_CONT "<RELEASED>\n");
return;
}
printk(KERN_CONT "%px", hlock->instance);
lite_print_lock_name(lock);
printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip);
}
void lite_lockdep_print_held_locks(struct task_struct *p)
{
int i, depth = READ_ONCE(p->lite_lockdep_depth);
if (!depth)
printk("no locks held by %s/%d.\n", p->comm, task_pid_nr(p));
else
printk("%d lock%s held by %s/%d:\n", depth,
depth > 1 ? "s" : "", p->comm, task_pid_nr(p));
if (p->state == TASK_RUNNING && p != current)
return;
for (i = 0; i < depth; i++) {
printk(" #%d: ", i);
lite_print_lock(p->held_locks + i);
}
}
#ifdef __KERNEL__
void lite_debug_show_all_locks(void)
{
struct task_struct *g, *p;
if (unlikely(!debug_locks)) {
pr_warn("INFO: lite-lockdep is turned off.\n");
return;
}
pr_warn("\nShowing all locks held in the system:\n");
rcu_read_lock();
for_each_process_thread(g, p) {
if (!p->lite_lockdep_depth)
continue;
lite_lockdep_print_held_locks(p);
touch_nmi_watchdog();
touch_all_softlockup_watchdogs();
}
rcu_read_unlock();
pr_warn("\n");
pr_warn("=============================================\n\n");
}
EXPORT_SYMBOL_GPL(lite_debug_show_all_locks);
#endif
static void print_lite_kernel_ident(void)
{
printk("%s %.*s %s\n", init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
init_utsname()->version,
print_tainted());
}
static void init_data_structures_once(void)
{
static bool __read_mostly initialized;
int i;
if (likely(initialized))
return;
initialized = true;
for (i = 0; i < ARRAY_SIZE(lite_lock_classes); i++) {
list_add_tail(&lite_lock_classes[i].lock_entry, &free_lite_lock_classes);
}
}
static noinstr struct lite_lock_class *
look_up_lite_lock_class(const struct lite_lockdep_map *lock)
{
struct lite_lock_class_sub_key *key;
struct hlist_head *hash_head;
struct lite_lock_class *class;
if (unlikely(!lock->key))
return NULL;
key = lock->key->sub_key;
hash_head = liteclasshashentry(key);
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return NULL;
hlist_for_each_entry_rcu_notrace(class, hash_head, hash_entry) {
if (class->key == key) {
WARN_ON_ONCE(class->name != lock->name &&
lock->key != &__lite_lockdep_no_validate__);
return class;
}
}
return NULL;
}
/*
* Register a lock's class in the hash-table.
*/
static struct lite_lock_class *
register_lite_lock_class(struct lite_lockdep_map *lock)
{
struct lite_lock_class_sub_key *key;
struct hlist_head *hash_head;
struct lite_lock_class *class;
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
class = look_up_lite_lock_class(lock);
if (likely(class))
goto out_set_class;
if (!lock->key) {
if (!assign_lite_lock_key(lock))
return NULL;
} else if (!static_obj(lock->key) && !is_dynamic_key(lock->key)) {
return NULL;
}
key = lock->key->sub_key;
hash_head = liteclasshashentry(key);
if (!lite_graph_lock()) {
return NULL;
}
hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
if (class->key == key)
goto out_unlock_set;
}
init_data_structures_once();
class = list_first_entry_or_null(&free_lite_lock_classes, typeof(*class),
lock_entry);
if (!class) {
printk(KERN_DEBUG "BUG: MAX_LOCKDEP_KEYS too low!");
dump_stack();
return NULL;
}
nr_lite_lock_classes++;
__set_bit(class - lite_lock_classes, lite_lock_classes_in_use);
class->key = key;
class->name = lock->name;
hlist_add_head_rcu(&class->hash_entry, hash_head);
list_move_tail(&class->lock_entry, &all_lite_lock_classes);
out_unlock_set:
lite_graph_unlock();
out_set_class:
lock->class = class;
return class;
}
static int
__lite_lock_acquire(struct lite_lockdep_map *lock, unsigned int subclass,
int trylock, int read, int check,
struct lite_lockdep_map *nest_lock, unsigned long ip,
int reacquire)
{
struct task_struct *curr = current;
struct lite_lock_class *class = NULL;
struct lite_held_lock *hlock;
unsigned int depth;
int class_idx;
int ret;
if (unlikely(!debug_locks))
return 0;
if (!lite_lockdep)
return 0;
if (lock->key == &__lite_lockdep_no_validate__)
check = 0;
class = lock->class;
if (unlikely(!class)) {
class = register_lite_lock_class(lock);
if (!class)
return 0;
}
depth = curr->lite_lockdep_depth;
if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LITE_LOCK_DEPTH))
return 0;
class_idx = class - lite_lock_classes;
hlock = curr->held_locks + depth;
hlock->class_idx = class_idx;
hlock->subclass = subclass;
hlock->acquire_ip = ip;
hlock->instance = lock;
hlock->nest_lock = nest_lock;
hlock->trylock = trylock;
hlock->read = read;
hlock->check = check;
if (DEBUG_LOCKS_WARN_ON(!test_bit(class_idx, lite_lock_classes_in_use)))
return 0;
curr->lite_lockdep_depth++;
if (unlikely(curr->lite_lockdep_depth >= MAX_LITE_LOCK_DEPTH)) {
debug_locks_off();
printk(KERN_DEBUG "BUG: MAX_LOCK_DEPTH too low!");
printk(KERN_DEBUG "depth: %i max: %lu!\n",
curr->lite_lockdep_depth, MAX_LITE_LOCK_DEPTH);
lite_lockdep_print_held_locks(current);
lite_debug_show_all_locks();
dump_stack();
return 0;
}
return ret;
}
static noinstr int match_lite_held_lock(const struct lite_held_lock *hlock,
const struct lite_lockdep_map *lock)
{
if (hlock->instance == lock)
return 1;
return 0;
}
static struct lite_held_lock *find_lite_held_lock(struct task_struct *curr,
struct lite_lockdep_map *lock,
unsigned int depth, int *idx)
{
struct lite_held_lock *ret, *hlock, *prev_hlock;
int i;
i = depth - 1;
hlock = curr->held_locks + i;
ret = hlock;
if (match_lite_held_lock(hlock, lock))
goto out;
ret = NULL;
for (i--, prev_hlock = hlock--;
i >= 0;
i--, prev_hlock = hlock--) {
if (match_lite_held_lock(hlock, lock)) {
ret = hlock;
break;
}
}
out:
*idx = i;
return ret;
}
static int
lite_reacquire_held_locks(struct task_struct *curr, unsigned int depth, int idx)
{
struct lite_held_lock *hlock;
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return 0;
for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
switch (__lite_lock_acquire(hlock->instance,
hlock->subclass,
hlock->trylock,
hlock->read,
hlock->check,
hlock->nest_lock,
hlock->acquire_ip,
1)) {
case 0:
return 1;
case 1:
break;
default:
WARN_ON(1);
return 0;
}
}
return 0;
}
static void print_lite_lockdep_cache(struct lite_lockdep_map *lock)
{
const char *name;
char str[KSYM_NAME_LEN];
name = lock->name;
if (!name)
name = __get_key_name(lock->key->sub_key, str);
printk(KERN_CONT "%s", name);
}
static inline void print_lite_ip_sym(const char *loglvl, unsigned long ip)
{
printk("%s[<%px>] %pS\n", loglvl, (void *) ip, (void *) ip);
}
static void print_lite_unlock_imbalance_bug(struct task_struct *curr,
struct lite_lockdep_map *lock,
unsigned long ip)
{
pr_warn("\n");
pr_warn("=====================================\n");
pr_warn("WARNING: bad unlock balance detected!\n");
print_lite_kernel_ident();
pr_warn("-------------------------------------\n");
pr_warn("%s/%d is trying to release lock (",
curr->comm, task_pid_nr(curr));
print_lite_lockdep_cache(lock);
pr_cont(") at:\n");
print_lite_ip_sym(KERN_WARNING, ip);
pr_warn("but there are no more locks to release!\n");
pr_warn("\nother info that might help us debug this:\n");
lite_lockdep_print_held_locks(curr);
pr_warn("\nstack backtrace:\n");
dump_stack();
}
static int
__lite_lock_release(struct lite_lockdep_map *lock, unsigned long ip)
{
struct task_struct *curr = current;
unsigned int depth = 1;
struct lite_held_lock *hlock;
int i;
if (unlikely(!debug_locks))
return 0;
if (!lite_lockdep)
return 0;
depth = curr->lite_lockdep_depth;
if (depth <= 0) {
print_lite_unlock_imbalance_bug(curr, lock, ip);
return 0;
}
hlock = find_lite_held_lock(curr, lock, depth, &i);
if (!hlock) {
print_lite_unlock_imbalance_bug(curr, lock, ip);
return 0;
}
curr->lite_lockdep_depth = i;
if (i == depth - 1)
return 1;
if (lite_reacquire_held_locks(curr, depth, i + 1))
return 0;
return 0;
}
void lite_lock_acquire(struct lite_lockdep_map *lock, unsigned int subclass,
int trylock, int read, int check,
struct lite_lockdep_map *nest_lock, unsigned long ip)
{
unsigned long flags;
if (!debug_locks)
return;
raw_local_irq_save(flags);
trace_lock_acquire_lite(lock, subclass, trylock, read, check, nest_lock, ip);
__lite_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip, 0);
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lite_lock_acquire);
void lite_lock_release(struct lite_lockdep_map *lock, unsigned long ip)
{
unsigned long flags;
trace_lock_release_lite(lock, ip);
raw_local_irq_save(flags);
__lite_lock_release(lock, ip);
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lite_lock_release);
void lite_lockdep_init_map_type(struct lite_lockdep_map *lock, const char *name,
struct lite_lock_class_key *key, int subclass)
{
lock->class = NULL;
if (DEBUG_LOCKS_WARN_ON(!name)) {
lock->name = "NULL";
return;
}
lock->name = name;
if (DEBUG_LOCKS_WARN_ON(!key))
return;
if (!static_obj(key) && !is_dynamic_key(key)) {
if (debug_locks)
printk(KERN_ERR "BUG: key %px has not been registered!\n", key);
DEBUG_LOCKS_WARN_ON(1);
return;
}
lock->key = key;
if (unlikely(!debug_locks))
return;
}
EXPORT_SYMBOL_GPL(lite_lockdep_init_map_type);
......@@ -86,6 +86,9 @@ void debug_mutex_init(struct mutex *lock, const char *name,
*/
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP);
#endif
#ifdef CONFIG_LITE_LOCKDEP
lite_lockdep_init_map(&lock->lite_dep_map, name, key, 0);
#endif
lock->magic = lock;
}
......
......@@ -152,7 +152,7 @@ static inline bool __mutex_trylock(struct mutex *lock)
return !__mutex_trylock_or_owner(lock);
}
#ifndef CONFIG_DEBUG_LOCK_ALLOC
#if !defined(CONFIG_DEBUG_LOCK_ALLOC) && !defined(CONFIG_LITE_LOCKDEP)
/*
* Lockdep annotations are contained to the slow paths for simplicity.
* There is nothing that would stop spreading the lockdep annotations outwards
......@@ -256,7 +256,7 @@ static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
}
}
#ifndef CONFIG_DEBUG_LOCK_ALLOC
#if !defined(CONFIG_DEBUG_LOCK_ALLOC) && !defined(CONFIG_LITE_LOCKDEP)
/*
* We split the mutex lock/unlock logic into separate fastpath and
* slowpath functions, to reduce the register pressure on the fastpath.
......@@ -743,7 +743,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
*/
void __sched mutex_unlock(struct mutex *lock)
{
#ifndef CONFIG_DEBUG_LOCK_ALLOC
#if !defined(CONFIG_DEBUG_LOCK_ALLOC) && !defined(CONFIG_LITE_LOCKDEP)
if (__mutex_unlock_fast(lock))
return;
#endif
......@@ -965,7 +965,11 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
}
preempt_disable();
#ifdef CONFIG_LITE_LOCKDEP
lite_mutex_acquire_nest(&lock->lite_dep_map, subclass, 0, nest_lock, ip);
#else
mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
#endif
if (__mutex_trylock(lock) ||
mutex_optimistic_spin(lock, ww_ctx, NULL)) {
......@@ -1097,7 +1101,11 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
err_early_kill:
spin_unlock(&lock->wait_lock);
debug_mutex_free_waiter(&waiter);
#ifdef CONFIG_LITE_LOCKDEP
lite_mutex_release(&lock->lite_dep_map, ip);
#else
mutex_release(&lock->dep_map, ip);
#endif
preempt_enable();
return ret;
}
......@@ -1117,7 +1125,8 @@ __ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_LITE_LOCKDEP)
void __sched
mutex_lock_nested(struct mutex *lock, unsigned int subclass)
{
......@@ -1231,7 +1240,11 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
DEFINE_WAKE_Q(wake_q);
unsigned long owner;
#ifdef CONFIG_LITE_LOCKDEP
lite_mutex_release(&lock->lite_dep_map, ip);
#else
mutex_release(&lock->dep_map, ip);
#endif
/*
* Release the lock before (potentially) taking the spinlock such that
......@@ -1286,7 +1299,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
wake_up_q(&wake_q);
}
#ifndef CONFIG_DEBUG_LOCK_ALLOC
#if !defined(CONFIG_DEBUG_LOCK_ALLOC) && !defined(CONFIG_LITE_LOCKDEP)
/*
* Here come the less common (and hence less performance-critical) APIs:
* mutex_lock_interruptible() and mutex_trylock().
......@@ -1422,14 +1435,19 @@ int __sched mutex_trylock(struct mutex *lock)
#endif
locked = __mutex_trylock(lock);
#ifdef CONFIG_LITE_LOCKDEP
if (locked)
lite_mutex_acquire(&lock->lite_dep_map, 0, 1, _RET_IP_);
#else
if (locked)
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
#endif
return locked;
}
EXPORT_SYMBOL(mutex_trylock);
#ifndef CONFIG_DEBUG_LOCK_ALLOC
#if !defined(CONFIG_DEBUG_LOCK_ALLOC) && !defined(CONFIG_LITE_LOCKDEP)
int __sched
ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
......
......@@ -330,6 +330,9 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
debug_check_no_locks_freed((void *)sem, sizeof(*sem));
lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP);
#endif
#ifdef CONFIG_LITE_LOCKDEP
lite_lockdep_init_map(&sem->lite_dep_map, name, key, 0);
#endif
#ifdef CONFIG_DEBUG_RWSEMS
sem->magic = sem;
#endif
......@@ -1501,7 +1504,11 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
void __sched down_read(struct rw_semaphore *sem)
{
might_sleep();
#ifdef CONFIG_LITE_LOCKDEP
lite_rwsem_acquire_read(&sem->lite_dep_map, 0, 0, _RET_IP_);
#else
rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
#endif
LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
}
......@@ -1510,10 +1517,18 @@ EXPORT_SYMBOL(down_read);
int __sched down_read_interruptible(struct rw_semaphore *sem)
{
might_sleep();
#ifdef CONFIG_LITE_LOCKDEP
lite_rwsem_acquire_read(&sem->lite_dep_map, 0, 0, _RET_IP_);
#else
rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
#endif
if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_interruptible)) {
#ifdef CONFIG_LITE_LOCKDEP
lite_rwsem_release(&sem->lite_dep_map, _RET_IP_);
#else
rwsem_release(&sem->dep_map, _RET_IP_);
#endif
return -EINTR;
}
......@@ -1524,10 +1539,18 @@ EXPORT_SYMBOL(down_read_interruptible);
int __sched down_read_killable(struct rw_semaphore *sem)
{
might_sleep();
#ifdef CONFIG_LITE_LOCKDEP
lite_rwsem_acquire_read(&sem->lite_dep_map, 0, 0, _RET_IP_);
#else
rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
#endif
if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
#ifdef CONFIG_LITE_LOCKDEP
lite_rwsem_release(&sem->lite_dep_map, _RET_IP_);
#else
rwsem_release(&sem->dep_map, _RET_IP_);
#endif
return -EINTR;
}
......@@ -1543,7 +1566,11 @@ int down_read_trylock(struct rw_semaphore *sem)
int ret = __down_read_trylock(sem);
if (ret == 1)
#ifdef CONFIG_LITE_LOCKDEP
lite_rwsem_acquire_read(&sem->lite_dep_map, 0, 1, _RET_IP_);
#else
rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
#endif
return ret;
}
EXPORT_SYMBOL(down_read_trylock);
......@@ -1554,7 +1581,11 @@ EXPORT_SYMBOL(down_read_trylock);
void __sched down_write(struct rw_semaphore *sem)
{
might_sleep();
#ifdef CONFIG_LITE_LOCKDEP
lite_rwsem_acquire(&sem->lite_dep_map, 0, 0, _RET_IP_);
#else
rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
#endif
LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
}
EXPORT_SYMBOL(down_write);
......@@ -1565,11 +1596,19 @@ EXPORT_SYMBOL(down_write);
int __sched down_write_killable(struct rw_semaphore *sem)
{
might_sleep();
#ifdef CONFIG_LITE_LOCKDEP
lite_rwsem_acquire(&sem->lite_dep_map, 0, 0, _RET_IP_);
#else
rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
#endif
if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
__down_write_killable)) {
#ifdef CONFIG_LITE_LOCKDEP
lite_rwsem_release(&sem->lite_dep_map, _RET_IP_);
#else
rwsem_release(&sem->dep_map, _RET_IP_);
#endif
return -EINTR;
}
......@@ -1585,7 +1624,11 @@ int down_write_trylock(struct rw_semaphore *sem)
int ret = __down_write_trylock(sem);
if (ret == 1)
#ifdef CONFIG_LITE_LOCKDEP
lite_rwsem_acquire(&sem->lite_dep_map, 0, 1, _RET_IP_);
#else
rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
#endif
return ret;
}
......@@ -1596,7 +1639,11 @@ EXPORT_SYMBOL(down_write_trylock);
*/
void up_read(struct rw_semaphore *sem)
{
#ifdef CONFIG_LITE_LOCKDEP
lite_rwsem_release(&sem->lite_dep_map, _RET_IP_);
#else
rwsem_release(&sem->dep_map, _RET_IP_);
#endif
__up_read(sem);
}
EXPORT_SYMBOL(up_read);
......@@ -1606,7 +1653,11 @@ EXPORT_SYMBOL(up_read);
*/
void up_write(struct rw_semaphore *sem)
{
#ifdef CONFIG_LITE_LOCKDEP
lite_rwsem_release(&sem->lite_dep_map, _RET_IP_);
#else
rwsem_release(&sem->dep_map, _RET_IP_);
#endif
__up_write(sem);
}
EXPORT_SYMBOL(up_write);
......@@ -1621,12 +1672,17 @@ void downgrade_write(struct rw_semaphore *sem)
}
EXPORT_SYMBOL(downgrade_write);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_LITE_LOCKDEP)
void down_read_nested(struct rw_semaphore *sem, int subclass)
{
might_sleep();
#ifdef CONFIG_LITE_LOCKDEP
lite_rwsem_acquire_read(&sem->lite_dep_map, subclass, 0, _RET_IP_);
#else
rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
#endif
LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
}
EXPORT_SYMBOL(down_read_nested);
......@@ -1634,10 +1690,19 @@ EXPORT_SYMBOL(down_read_nested);
int down_read_killable_nested(struct rw_semaphore *sem, int subclass)
{
might_sleep();
#ifdef CONFIG_LITE_LOCKDEP
lite_rwsem_acquire_read(&sem->lite_dep_map, subclass, 0, _RET_IP_);
#else
rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
#endif
if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
#ifdef CONFIG_LITE_LOCKDEP
lite_rwsem_release(&sem->lite_dep_map, _RET_IP_);
#else
rwsem_release(&sem->dep_map, _RET_IP_);
#endif
return -EINTR;
}
......@@ -1648,7 +1713,12 @@ EXPORT_SYMBOL(down_read_killable_nested);
void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
{
might_sleep();
#ifdef CONFIG_LITE_LOCKDEP
lite_rwsem_acquire_nest(&sem->lite_dep_map, 0, 0, nest, _RET_IP_);
#else
rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_);
#endif
LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
}
EXPORT_SYMBOL(_down_write_nest_lock);
......@@ -1664,7 +1734,12 @@ EXPORT_SYMBOL(down_read_non_owner);
void down_write_nested(struct rw_semaphore *sem, int subclass)
{
might_sleep();
#ifdef CONFIG_LITE_LOCKDEP
lite_rwsem_acquire(&sem->lite_dep_map, subclass, 0, _RET_IP_);
#else
rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
#endif
LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
}
EXPORT_SYMBOL(down_write_nested);
......@@ -1672,11 +1747,20 @@ EXPORT_SYMBOL(down_write_nested);
int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass)
{
might_sleep();
#ifdef CONFIG_LITE_LOCKDEP
lite_rwsem_acquire(&sem->lite_dep_map, subclass, 0, _RET_IP_);
#else
rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
#endif
if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
__down_write_killable)) {
#ifdef CONFIG_LITE_LOCKDEP
lite_rwsem_release(&sem->lite_dep_map, _RET_IP_);
#else
rwsem_release(&sem->dep_map, _RET_IP_);
#endif
return -EINTR;
}
......
......@@ -353,13 +353,18 @@ void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
EXPORT_SYMBOL(_raw_write_unlock_bh);
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_LITE_LOCKDEP)
void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
{
preempt_disable();
#ifdef CONFIG_LITE_LOCKDEP
lite_spin_acquire(&lock->lite_dep_map, subclass, 0, _RET_IP_);
do_raw_spin_lock(lock);
#else
spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
#endif
}
EXPORT_SYMBOL(_raw_spin_lock_nested);
......@@ -370,9 +375,14 @@ unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock,
local_irq_save(flags);
preempt_disable();
#ifdef CONFIG_LITE_LOCKDEP
lite_spin_acquire(&lock->lite_dep_map, subclass, 0, _RET_IP_);
do_raw_spin_lock(lock);
#else
spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock,
do_raw_spin_lock_flags, &flags);
#endif
return flags;
}
EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested);
......@@ -381,8 +391,13 @@ void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock,
struct lockdep_map *nest_lock)
{
preempt_disable();
#ifdef CONFIG_LITE_LOCKDEP
lite_spin_acquire_nest(&lock->lite_dep_map, 0, 0, nest_lock, _RET_IP_);
do_raw_spin_lock(lock);
#else
spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
#endif
}
EXPORT_SYMBOL(_raw_spin_lock_nest_lock);
......
......@@ -22,6 +22,9 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
*/
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lockdep_init_map_wait(&lock->dep_map, name, key, 0, inner);
#endif
#ifdef CONFIG_LITE_LOCKDEP
lite_lockdep_init_map(&lock->lite_dep_map, name, key, 0);
#endif
lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
lock->magic = SPINLOCK_MAGIC;
......
......@@ -20,6 +20,10 @@
#include <asm/switch_to.h>
#include <asm/tlb.h>
#ifdef CONFIG_LITE_LOCKDEP
#include <linux/lite_lockdep.h>
#endif
#include "../workqueue_internal.h"
#include "../../fs/io-wq.h"
#include "../smpboot.h"
......@@ -3540,7 +3544,11 @@ prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf
* do an early lockdep release here:
*/
rq_unpin_lock(rq, rf);
#ifdef CONFIG_LITE_LOCKDEP
lite_spin_release(&rq->lock.lite_dep_map, _THIS_IP_);
#else
spin_release(&rq->lock.dep_map, _THIS_IP_);
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
/* this is a valid case when another task releases the spinlock */
rq->lock.owner = next;
......@@ -3554,7 +3562,11 @@ static inline void finish_lock_switch(struct rq *rq)
* fix up the runqueue lock - which gets 'carried over' from
* prev into current:
*/
#ifdef CONFIG_LITE_LOCKDEP
lite_spin_acquire(&rq->lock.lite_dep_map, 0, 0, _THIS_IP_);
#else
spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
#endif
raw_spin_unlock_irq(&rq->lock);
}
......
......@@ -8,7 +8,11 @@ void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
struct lock_class_key *key)
{
raw_spin_lock_init(&q->lock);
#ifdef CONFIG_LITE_LOCKDEP
lite_lockdep_set_class_and_name(&q->lock, key, name);
#else
lockdep_set_class_and_name(&q->lock, key, name);
#endif
INIT_LIST_HEAD(&q->task_list);
}
EXPORT_SYMBOL(__init_swait_queue_head);
......
......@@ -9,7 +9,11 @@
void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
{
spin_lock_init(&wq_head->lock);
#ifdef CONFIG_LITE_LOCKDEP
lite_lockdep_set_class_and_name(&wq_head->lock, key, name);
#else
lockdep_set_class_and_name(&wq_head->lock, key, name);
#endif
INIT_LIST_HEAD(&wq_head->head);
}
......
......@@ -95,6 +95,9 @@
#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_LOCK_STAT)
#include <linux/lockdep.h>
#endif
#ifdef CONFIG_LITE_LOCKDEP
#include <linux/lite_lockdep.h>
#endif
#ifdef CONFIG_CHR_DEV_SG
#include <scsi/sg.h>
#endif
......@@ -1901,6 +1904,15 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
#ifdef CONFIG_LITE_LOCKDEP
{
.procname = "lite_lockdep",
.data = &lite_lockdep,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif
{
.procname = "panic",
......
......@@ -1342,6 +1342,14 @@ config LOCKDEP
select KALLSYMS
select KALLSYMS_ALL
config LITE_LOCKDEP
bool "Lightweight deadlock detection"
depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
select DEBUG_SPINLOCK
select DEBUG_MUTEXES
select DEBUG_RWSEMS
default n
config LOCKDEP_SMALL
bool
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册