lockref.h 1.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
#ifndef __LINUX_LOCKREF_H
#define __LINUX_LOCKREF_H

/*
 * Locked reference counts.
 *
 * These are different from just plain atomic refcounts in that they
 * are atomic with respect to the spinlock that goes with them.  In
 * particular, there can be implementations that don't actually get
 * the spinlock for the common decrement/increment operations, but they
 * still have to check that the operation is done semantically as if
 * the spinlock had been taken (using a cmpxchg operation that covers
 * both the lock and the count word, or using memory transactions, for
 * example).
 */

#include <linux/spinlock.h>
18 19 20 21
#include <generated/bounds.h>

#define USE_CMPXCHG_LOCKREF \
	(IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \
22
	 IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4)
23 24

struct lockref {
25
	union {
26
#if USE_CMPXCHG_LOCKREF
27 28 29 30
		aligned_u64 lock_count;
#endif
		struct {
			spinlock_t lock;
31
			int count;
32 33
		};
	};
34 35
};

36
extern void lockref_get(struct lockref *);
37
extern int lockref_put_return(struct lockref *);
38 39 40
extern int lockref_get_not_zero(struct lockref *);
extern int lockref_get_or_lock(struct lockref *);
extern int lockref_put_or_lock(struct lockref *);
41

42 43 44
extern void lockref_mark_dead(struct lockref *);
extern int lockref_get_not_dead(struct lockref *);

S
Steven Whitehouse 已提交
45 46 47 48 49 50
/* Must be called under spinlock for reliable results */
static inline int __lockref_is_dead(const struct lockref *l)
{
	return ((int)l->count < 0);
}

51
#endif /* __LINUX_LOCKREF_H */