atomic_32.c 6.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * Copyright 2010 Tilera Corporation. All Rights Reserved.
 *
 *   This program is free software; you can redistribute it and/or
 *   modify it under the terms of the GNU General Public License
 *   as published by the Free Software Foundation, version 2.
 *
 *   This program is distributed in the hope that it will be useful, but
 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 *   NON INFRINGEMENT.  See the GNU General Public License for
 *   more details.
 */

#include <linux/cache.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/mm.h>
A
Arun Sharma 已提交
20
#include <linux/atomic.h>
21 22 23
#include <arch/chip.h>

/* This page is remapped on startup to be hash-for-home. */
24
int atomic_locks[PAGE_SIZE / sizeof(int)] __page_aligned_bss;
25

26
int *__atomic_hashed_lock(volatile void *v)
27
{
28
	/* NOTE: this code must match "sys_cmpxchg" in kernel/intvec_32.S */
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
	/*
	 * Use bits [3, 3 + ATOMIC_HASH_SHIFT) as the lock index.
	 * Using mm works here because atomic_locks is page aligned.
	 */
	unsigned long ptr = __insn_mm((unsigned long)v >> 1,
				      (unsigned long)atomic_locks,
				      2, (ATOMIC_HASH_SHIFT + 2) - 1);
	return (int *)ptr;
}

#ifdef CONFIG_SMP
/* Return whether the passed pointer is a valid atomic lock pointer. */
static int is_atomic_lock(int *p)
{
	return p >= &atomic_locks[0] && p < &atomic_locks[ATOMIC_HASH_SIZE];
}

void __atomic_fault_unlock(int *irqlock_word)
{
	BUG_ON(!is_atomic_lock(irqlock_word));
	BUG_ON(*irqlock_word != 1);
	*irqlock_word = 0;
}

#endif /* CONFIG_SMP */

static inline int *__atomic_setup(volatile void *v)
{
	/* Issue a load to the target to bring it into cache. */
	*(volatile int *)v;
	return __atomic_hashed_lock(v);
}

C
Chris Metcalf 已提交
62
int _atomic_xchg(int *v, int n)
63
{
64
	return __atomic32_xchg(v, __atomic_setup(v), n).val;
65 66 67
}
EXPORT_SYMBOL(_atomic_xchg);

C
Chris Metcalf 已提交
68
int _atomic_xchg_add(int *v, int i)
69
{
70
	return __atomic32_xchg_add(v, __atomic_setup(v), i).val;
71 72 73
}
EXPORT_SYMBOL(_atomic_xchg_add);

C
Chris Metcalf 已提交
74
int _atomic_xchg_add_unless(int *v, int a, int u)
75 76 77 78 79 80
{
	/*
	 * Note: argument order is switched here since it is easier
	 * to use the first argument consistently as the "old value"
	 * in the assembly, as is done for _atomic_cmpxchg().
	 */
81
	return __atomic32_xchg_add_unless(v, __atomic_setup(v), u, a).val;
82 83 84
}
EXPORT_SYMBOL(_atomic_xchg_add_unless);

C
Chris Metcalf 已提交
85
int _atomic_cmpxchg(int *v, int o, int n)
86
{
87
	return __atomic32_cmpxchg(v, __atomic_setup(v), o, n).val;
88 89 90
}
EXPORT_SYMBOL(_atomic_cmpxchg);

91
unsigned long _atomic_fetch_or(volatile unsigned long *p, unsigned long mask)
92
{
93
	return __atomic32_fetch_or((int *)p, __atomic_setup(p), mask).val;
94
}
95
EXPORT_SYMBOL(_atomic_fetch_or);
96

97
unsigned long _atomic_fetch_and(volatile unsigned long *p, unsigned long mask)
98
{
99
	return __atomic32_fetch_and((int *)p, __atomic_setup(p), mask).val;
100
}
101
EXPORT_SYMBOL(_atomic_fetch_and);
102

103
unsigned long _atomic_fetch_andn(volatile unsigned long *p, unsigned long mask)
104
{
105
	return __atomic32_fetch_andn((int *)p, __atomic_setup(p), mask).val;
106
}
107
EXPORT_SYMBOL(_atomic_fetch_andn);
108

109
unsigned long _atomic_fetch_xor(volatile unsigned long *p, unsigned long mask)
110
{
111
	return __atomic32_fetch_xor((int *)p, __atomic_setup(p), mask).val;
112
}
113
EXPORT_SYMBOL(_atomic_fetch_xor);
114 115


116
long long _atomic64_xchg(long long *v, long long n)
117
{
C
Chris Metcalf 已提交
118
	return __atomic64_xchg(v, __atomic_setup(v), n);
119 120 121
}
EXPORT_SYMBOL(_atomic64_xchg);

122
long long _atomic64_xchg_add(long long *v, long long i)
123
{
C
Chris Metcalf 已提交
124
	return __atomic64_xchg_add(v, __atomic_setup(v), i);
125 126 127
}
EXPORT_SYMBOL(_atomic64_xchg_add);

128
long long _atomic64_xchg_add_unless(long long *v, long long a, long long u)
129 130 131 132 133 134
{
	/*
	 * Note: argument order is switched here since it is easier
	 * to use the first argument consistently as the "old value"
	 * in the assembly, as is done for _atomic_cmpxchg().
	 */
C
Chris Metcalf 已提交
135
	return __atomic64_xchg_add_unless(v, __atomic_setup(v), u, a);
136 137 138
}
EXPORT_SYMBOL(_atomic64_xchg_add_unless);

139
long long _atomic64_cmpxchg(long long *v, long long o, long long n)
140
{
C
Chris Metcalf 已提交
141
	return __atomic64_cmpxchg(v, __atomic_setup(v), o, n);
142 143 144
}
EXPORT_SYMBOL(_atomic64_cmpxchg);

145
long long _atomic64_fetch_and(long long *v, long long n)
146
{
147
	return __atomic64_fetch_and(v, __atomic_setup(v), n);
148
}
149
EXPORT_SYMBOL(_atomic64_fetch_and);
150

151
long long _atomic64_fetch_or(long long *v, long long n)
152
{
153
	return __atomic64_fetch_or(v, __atomic_setup(v), n);
154
}
155
EXPORT_SYMBOL(_atomic64_fetch_or);
156

157
long long _atomic64_fetch_xor(long long *v, long long n)
158
{
159
	return __atomic64_fetch_xor(v, __atomic_setup(v), n);
160
}
161
EXPORT_SYMBOL(_atomic64_fetch_xor);
162 163 164 165 166 167 168 169 170 171 172

/*
 * If any of the atomic or futex routines hit a bad address (not in
 * the page tables at kernel PL) this routine is called.  The futex
 * routines are never used on kernel space, and the normal atomics and
 * bitops are never used on user space.  So a fault on kernel space
 * must be fatal, but a fault on userspace is a futex fault and we
 * need to return -EFAULT.  Note that the context this routine is
 * invoked in is the context of the "_atomic_xxx()" routines called
 * by the functions in this file.
 */
173
struct __get_user __atomic_bad_address(int __user *addr)
174 175 176 177 178 179 180 181 182 183
{
	if (unlikely(!access_ok(VERIFY_WRITE, addr, sizeof(int))))
		panic("Bad address used for kernel atomic op: %p\n", addr);
	return (struct __get_user) { .err = -EFAULT };
}


void __init __init_atomic_per_cpu(void)
{
	/* Validate power-of-two and "bigger than cpus" assumption */
184
	BUILD_BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1));
185 186 187 188 189 190 191 192 193 194 195 196 197
	BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids);

	/*
	 * On TILEPro we prefer to use a single hash-for-home
	 * page, since this means atomic operations are less
	 * likely to encounter a TLB fault and thus should
	 * in general perform faster.  You may wish to disable
	 * this in situations where few hash-for-home tiles
	 * are configured.
	 */
	BUG_ON((unsigned long)atomic_locks % PAGE_SIZE != 0);

	/* The locks must all fit on one page. */
198
	BUILD_BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE);
199 200 201 202 203 204

	/*
	 * We use the page offset of the atomic value's address as
	 * an index into atomic_locks, excluding the low 3 bits.
	 * That should not produce more indices than ATOMIC_HASH_SIZE.
	 */
205
	BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
206
}