spinlock.c 4.2 KB
Newer Older
1 2 3 4
/*
 *  arch/s390/lib/spinlock.c
 *    Out of line spinlock code.
 *
5
 *    Copyright (C) IBM Corp. 2004, 2006
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
 */

#include <linux/types.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <asm/io.h>

int spin_retry = 1000;

/**
 * spin_retry= parameter
 */
static int __init spin_retry_setup(char *str)
{
	spin_retry = simple_strtoul(str, &str, 0);
	return 1;
}
__setup("spin_retry=", spin_retry_setup);

27
static inline void _raw_yield(void)
28 29 30 31 32
{
	if (MACHINE_HAS_DIAG44)
		asm volatile("diag 0,0,0x44");
}

33 34 35 36 37 38 39 40 41
static inline void _raw_yield_cpu(int cpu)
{
	if (MACHINE_HAS_DIAG9C)
		asm volatile("diag %0,0,0x9c"
			     : : "d" (__cpu_logical_map[cpu]));
	else
		_raw_yield();
}

42
void arch_spin_lock_wait(arch_spinlock_t *lp)
43 44
{
	int count = spin_retry;
45
	unsigned int cpu = ~smp_processor_id();
46 47 48

	while (1) {
		if (count-- <= 0) {
49 50 51
			unsigned int owner = lp->owner_cpu;
			if (owner != 0)
				_raw_yield_cpu(~owner);
52 53
			count = spin_retry;
		}
54
		if (arch_spin_is_locked(lp))
55
			continue;
56
		if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
57 58 59
			return;
	}
}
60
EXPORT_SYMBOL(arch_spin_lock_wait);
61

62
void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
63 64 65 66 67 68 69 70 71 72 73 74
{
	int count = spin_retry;
	unsigned int cpu = ~smp_processor_id();

	local_irq_restore(flags);
	while (1) {
		if (count-- <= 0) {
			unsigned int owner = lp->owner_cpu;
			if (owner != 0)
				_raw_yield_cpu(~owner);
			count = spin_retry;
		}
75
		if (arch_spin_is_locked(lp))
76 77 78 79 80 81 82
			continue;
		local_irq_disable();
		if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
			return;
		local_irq_restore(flags);
	}
}
83
EXPORT_SYMBOL(arch_spin_lock_wait_flags);
84

85
int arch_spin_trylock_retry(arch_spinlock_t *lp)
86
{
87 88
	unsigned int cpu = ~smp_processor_id();
	int count;
89

90
	for (count = spin_retry; count > 0; count--) {
91
		if (arch_spin_is_locked(lp))
92
			continue;
93
		if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
94 95 96 97
			return 1;
	}
	return 0;
}
98
EXPORT_SYMBOL(arch_spin_trylock_retry);
99

100
void arch_spin_relax(arch_spinlock_t *lock)
101 102 103 104 105
{
	unsigned int cpu = lock->owner_cpu;
	if (cpu != 0)
		_raw_yield_cpu(~cpu);
}
106
EXPORT_SYMBOL(arch_spin_relax);
107

108
void _raw_read_lock_wait(arch_rwlock_t *rw)
109 110 111 112 113 114
{
	unsigned int old;
	int count = spin_retry;

	while (1) {
		if (count-- <= 0) {
115
			_raw_yield();
116 117
			count = spin_retry;
		}
118 119
		if (!__raw_read_can_lock(rw))
			continue;
120 121 122 123 124 125 126
		old = rw->lock & 0x7fffffffU;
		if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
			return;
	}
}
EXPORT_SYMBOL(_raw_read_lock_wait);

127
void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
{
	unsigned int old;
	int count = spin_retry;

	local_irq_restore(flags);
	while (1) {
		if (count-- <= 0) {
			_raw_yield();
			count = spin_retry;
		}
		if (!__raw_read_can_lock(rw))
			continue;
		old = rw->lock & 0x7fffffffU;
		local_irq_disable();
		if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
			return;
	}
}
EXPORT_SYMBOL(_raw_read_lock_wait_flags);

148
int _raw_read_trylock_retry(arch_rwlock_t *rw)
149 150 151 152 153
{
	unsigned int old;
	int count = spin_retry;

	while (count-- > 0) {
154 155
		if (!__raw_read_can_lock(rw))
			continue;
156 157 158 159 160 161 162 163
		old = rw->lock & 0x7fffffffU;
		if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
			return 1;
	}
	return 0;
}
EXPORT_SYMBOL(_raw_read_trylock_retry);

164
void _raw_write_lock_wait(arch_rwlock_t *rw)
165 166 167 168 169
{
	int count = spin_retry;

	while (1) {
		if (count-- <= 0) {
170
			_raw_yield();
171 172
			count = spin_retry;
		}
173 174
		if (!__raw_write_can_lock(rw))
			continue;
175 176 177 178 179 180
		if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
			return;
	}
}
EXPORT_SYMBOL(_raw_write_lock_wait);

181
void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
{
	int count = spin_retry;

	local_irq_restore(flags);
	while (1) {
		if (count-- <= 0) {
			_raw_yield();
			count = spin_retry;
		}
		if (!__raw_write_can_lock(rw))
			continue;
		local_irq_disable();
		if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
			return;
	}
}
EXPORT_SYMBOL(_raw_write_lock_wait_flags);

200
int _raw_write_trylock_retry(arch_rwlock_t *rw)
201 202 203 204
{
	int count = spin_retry;

	while (count-- > 0) {
205 206
		if (!__raw_write_can_lock(rw))
			continue;
207 208 209 210 211 212
		if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
			return 1;
	}
	return 0;
}
EXPORT_SYMBOL(_raw_write_trylock_retry);