rwsem.h 3.4 KB
Newer Older
1 2
#ifndef _ASM_GENERIC_RWSEM_H
#define _ASM_GENERIC_RWSEM_H
3 4 5 6 7 8 9 10

#ifndef _LINUX_RWSEM_H
#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
#endif

#ifdef __KERNEL__

/*
11
 * R/W semaphores originally for PPC using the stuff in lib/rwsem.c.
12 13 14 15 16 17 18
 * Adapted largely from include/asm-i386/rwsem.h
 * by Paul Mackerras <paulus@samba.org>.
 */

/*
 * the semaphore definition
 */
19
#ifdef CONFIG_64BIT
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
# define RWSEM_ACTIVE_MASK		0xffffffffL
#else
# define RWSEM_ACTIVE_MASK		0x0000ffffL
#endif

#define RWSEM_UNLOCKED_VALUE		0x00000000L
#define RWSEM_ACTIVE_BIAS		0x00000001L
#define RWSEM_WAITING_BIAS		(-RWSEM_ACTIVE_MASK-1)
#define RWSEM_ACTIVE_READ_BIAS		RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS		(RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)

/*
 * lock for reading
 */
static inline void __down_read(struct rw_semaphore *sem)
{
36
	if (unlikely(atomic_long_inc_return_acquire((atomic_long_t *)&sem->count) <= 0))
37 38 39 40 41 42 43 44
		rwsem_down_read_failed(sem);
}

static inline int __down_read_trylock(struct rw_semaphore *sem)
{
	long tmp;

	while ((tmp = sem->count) >= 0) {
45
		if (tmp == cmpxchg_acquire(&sem->count, tmp,
46 47 48 49 50 51 52 53 54 55
				   tmp + RWSEM_ACTIVE_READ_BIAS)) {
			return 1;
		}
	}
	return 0;
}

/*
 * lock for writing
 */
56
static inline void __down_write(struct rw_semaphore *sem)
57 58 59
{
	long tmp;

60
	tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
61 62 63 64 65
				     (atomic_long_t *)&sem->count);
	if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
		rwsem_down_write_failed(sem);
}

66 67 68 69 70 71 72 73 74 75 76 77
static inline int __down_write_killable(struct rw_semaphore *sem)
{
	long tmp;

	tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
				     (atomic_long_t *)&sem->count);
	if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
		if (IS_ERR(rwsem_down_write_failed_killable(sem)))
			return -EINTR;
	return 0;
}

78 79 80 81
static inline int __down_write_trylock(struct rw_semaphore *sem)
{
	long tmp;

82
	tmp = cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
83 84 85 86 87 88 89 90 91 92 93
		      RWSEM_ACTIVE_WRITE_BIAS);
	return tmp == RWSEM_UNLOCKED_VALUE;
}

/*
 * unlock after reading
 */
static inline void __up_read(struct rw_semaphore *sem)
{
	long tmp;

94
	tmp = atomic_long_dec_return_release((atomic_long_t *)&sem->count);
95 96 97 98 99 100 101 102 103
	if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
		rwsem_wake(sem);
}

/*
 * unlock after writing
 */
static inline void __up_write(struct rw_semaphore *sem)
{
104
	if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS,
105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
				 (atomic_long_t *)&sem->count) < 0))
		rwsem_wake(sem);
}

/*
 * implement atomic add functionality
 */
static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
{
	atomic_long_add(delta, (atomic_long_t *)&sem->count);
}

/*
 * downgrade write lock to read lock
 */
static inline void __downgrade_write(struct rw_semaphore *sem)
{
	long tmp;

124 125 126 127 128 129 130 131
	/*
	 * When downgrading from exclusive to shared ownership,
	 * anything inside the write-locked region cannot leak
	 * into the read side. In contrast, anything in the
	 * read-locked region is ok to be re-ordered into the
	 * write side. As such, rely on RELEASE semantics.
	 */
	tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS,
132 133 134 135 136 137 138 139 140 141 142 143 144 145
				     (atomic_long_t *)&sem->count);
	if (tmp < 0)
		rwsem_downgrade_wake(sem);
}

/*
 * implement exchange and add functionality
 */
static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
{
	return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
}

#endif	/* __KERNEL__ */
146
#endif	/* _ASM_GENERIC_RWSEM_H */