spinlock.c 6.0 KB
Newer Older
1 2 3
/*
 *    Out of line spinlock code.
 *
4
 *    Copyright IBM Corp. 2004, 2006
5 6 7 8
 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
 */

#include <linux/types.h>
9
#include <linux/export.h>
10 11
#include <linux/spinlock.h>
#include <linux/init.h>
M
Martin Schwidefsky 已提交
12
#include <linux/smp.h>
13 14
#include <asm/io.h>

15 16 17 18 19 20 21 22 23
int spin_retry = -1;

static int __init spin_retry_init(void)
{
	if (spin_retry < 0)
		spin_retry = MACHINE_HAS_CAD ? 10 : 1000;
	return 0;
}
early_initcall(spin_retry_init);
24 25 26 27 28 29 30 31 32 33 34

/**
 * spin_retry= parameter
 */
static int __init spin_retry_setup(char *str)
{
	spin_retry = simple_strtoul(str, &str, 0);
	return 1;
}
__setup("spin_retry=", spin_retry_setup);

35
static inline void compare_and_delay(int *lock, int old)
36 37 38 39
{
	asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
}

40
void arch_spin_lock_wait(arch_spinlock_t *lp)
41
{
42 43
	int cpu = SPINLOCK_LOCKVAL;
	int owner, count, first_diag;
44

45
	first_diag = 1;
46
	while (1) {
47 48 49
		owner = ACCESS_ONCE(lp->lock);
		/* Try to get the lock if it is free. */
		if (!owner) {
50
			if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
51 52
				return;
			continue;
53
		}
54
		/* First iteration: check if the lock owner is running. */
55
		if (first_diag && arch_vcpu_is_preempted(~owner)) {
56
			smp_yield_cpu(~owner);
57
			first_diag = 0;
58 59 60 61 62
			continue;
		}
		/* Loop for a while on the lock value. */
		count = spin_retry;
		do {
63
			if (MACHINE_HAS_CAD)
64
				compare_and_delay(&lp->lock, owner);
65 66 67 68 69 70
			owner = ACCESS_ONCE(lp->lock);
		} while (owner && count-- > 0);
		if (!owner)
			continue;
		/*
		 * For multiple layers of hypervisors, e.g. z/VM + LPAR
71 72
		 * yield the CPU unconditionally. For LPAR rely on the
		 * sense running status.
73
		 */
74
		if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
M
Martin Schwidefsky 已提交
75
			smp_yield_cpu(~owner);
76 77
			first_diag = 0;
		}
78 79
	}
}
80
EXPORT_SYMBOL(arch_spin_lock_wait);
81

82
void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
83
{
84 85
	int cpu = SPINLOCK_LOCKVAL;
	int owner, count, first_diag;
86 87

	local_irq_restore(flags);
88
	first_diag = 1;
89
	while (1) {
90 91 92 93
		owner = ACCESS_ONCE(lp->lock);
		/* Try to get the lock if it is free. */
		if (!owner) {
			local_irq_disable();
94
			if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
95 96
				return;
			local_irq_restore(flags);
97
			continue;
98 99
		}
		/* Check if the lock owner is running. */
100
		if (first_diag && arch_vcpu_is_preempted(~owner)) {
101
			smp_yield_cpu(~owner);
102
			first_diag = 0;
103
			continue;
104
		}
105 106 107
		/* Loop for a while on the lock value. */
		count = spin_retry;
		do {
108
			if (MACHINE_HAS_CAD)
109
				compare_and_delay(&lp->lock, owner);
110 111 112 113 114 115
			owner = ACCESS_ONCE(lp->lock);
		} while (owner && count-- > 0);
		if (!owner)
			continue;
		/*
		 * For multiple layers of hypervisors, e.g. z/VM + LPAR
116 117
		 * yield the CPU unconditionally. For LPAR rely on the
		 * sense running status.
118
		 */
119
		if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
M
Martin Schwidefsky 已提交
120
			smp_yield_cpu(~owner);
121 122
			first_diag = 0;
		}
123 124
	}
}
125
EXPORT_SYMBOL(arch_spin_lock_wait_flags);
126

127
int arch_spin_trylock_retry(arch_spinlock_t *lp)
128
{
129 130
	int cpu = SPINLOCK_LOCKVAL;
	int owner, count;
131

132
	for (count = spin_retry; count > 0; count--) {
133
		owner = READ_ONCE(lp->lock);
134 135
		/* Try to get the lock if it is free. */
		if (!owner) {
136
			if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
137 138
				return 1;
		} else if (MACHINE_HAS_CAD)
139
			compare_and_delay(&lp->lock, owner);
140
	}
141 142
	return 0;
}
143
EXPORT_SYMBOL(arch_spin_trylock_retry);
144

145
void _raw_read_lock_wait(arch_rwlock_t *rw)
146 147
{
	int count = spin_retry;
148
	int owner, old;
149

150 151 152
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
	__RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
#endif
153
	owner = 0;
154 155
	while (1) {
		if (count-- <= 0) {
156
			if (owner && arch_vcpu_is_preempted(~owner))
157
				smp_yield_cpu(~owner);
158 159
			count = spin_retry;
		}
160
		old = ACCESS_ONCE(rw->lock);
161
		owner = ACCESS_ONCE(rw->owner);
162
		if (old < 0) {
163
			if (MACHINE_HAS_CAD)
164
				compare_and_delay(&rw->lock, old);
165
			continue;
166
		}
167
		if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
168 169 170 171 172
			return;
	}
}
EXPORT_SYMBOL(_raw_read_lock_wait);

173
int _raw_read_trylock_retry(arch_rwlock_t *rw)
174 175
{
	int count = spin_retry;
176
	int old;
177 178

	while (count-- > 0) {
179
		old = ACCESS_ONCE(rw->lock);
180
		if (old < 0) {
181
			if (MACHINE_HAS_CAD)
182
				compare_and_delay(&rw->lock, old);
183
			continue;
184
		}
185
		if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
186 187 188 189 190 191
			return 1;
	}
	return 0;
}
EXPORT_SYMBOL(_raw_read_trylock_retry);

192 193
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES

194
void _raw_write_lock_wait(arch_rwlock_t *rw, int prev)
195 196
{
	int count = spin_retry;
197
	int owner, old;
198 199 200 201

	owner = 0;
	while (1) {
		if (count-- <= 0) {
202
			if (owner && arch_vcpu_is_preempted(~owner))
203 204 205 206 207
				smp_yield_cpu(~owner);
			count = spin_retry;
		}
		old = ACCESS_ONCE(rw->lock);
		owner = ACCESS_ONCE(rw->owner);
208
		smp_mb();
209
		if (old >= 0) {
210 211 212
			prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
			old = prev;
		}
213
		if ((old & 0x7fffffff) == 0 && prev >= 0)
214
			break;
215
		if (MACHINE_HAS_CAD)
216
			compare_and_delay(&rw->lock, old);
217 218 219 220 221 222
	}
}
EXPORT_SYMBOL(_raw_write_lock_wait);

#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */

223
void _raw_write_lock_wait(arch_rwlock_t *rw)
224 225
{
	int count = spin_retry;
226
	int owner, old, prev;
227

228
	prev = 0x80000000;
229
	owner = 0;
230 231
	while (1) {
		if (count-- <= 0) {
232
			if (owner && arch_vcpu_is_preempted(~owner))
233
				smp_yield_cpu(~owner);
234 235
			count = spin_retry;
		}
236
		old = ACCESS_ONCE(rw->lock);
237
		owner = ACCESS_ONCE(rw->owner);
238 239
		if (old >= 0 &&
		    __atomic_cmpxchg_bool(&rw->lock, old, old | 0x80000000))
240 241
			prev = old;
		else
242
			smp_mb();
243
		if ((old & 0x7fffffff) == 0 && prev >= 0)
244
			break;
245
		if (MACHINE_HAS_CAD)
246
			compare_and_delay(&rw->lock, old);
247 248 249 250
	}
}
EXPORT_SYMBOL(_raw_write_lock_wait);

251 252
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */

253
int _raw_write_trylock_retry(arch_rwlock_t *rw)
254 255
{
	int count = spin_retry;
256
	int old;
257 258

	while (count-- > 0) {
259
		old = ACCESS_ONCE(rw->lock);
260 261
		if (old) {
			if (MACHINE_HAS_CAD)
262
				compare_and_delay(&rw->lock, old);
263
			continue;
264
		}
265
		if (__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000))
266 267 268 269 270
			return 1;
	}
	return 0;
}
EXPORT_SYMBOL(_raw_write_trylock_retry);
271

272
void arch_lock_relax(int cpu)
273 274 275
{
	if (!cpu)
		return;
276
	if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
277 278 279 280
		return;
	smp_yield_cpu(~cpu);
}
EXPORT_SYMBOL(arch_lock_relax);