atomic_32.c 5.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * Copyright 2010 Tilera Corporation. All Rights Reserved.
 *
 *   This program is free software; you can redistribute it and/or
 *   modify it under the terms of the GNU General Public License
 *   as published by the Free Software Foundation, version 2.
 *
 *   This program is distributed in the hope that it will be useful, but
 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 *   NON INFRINGEMENT.  See the GNU General Public License for
 *   more details.
 */

#include <linux/cache.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/mm.h>
A
Arun Sharma 已提交
20
#include <linux/atomic.h>
21 22 23
#include <arch/chip.h>

/* This page is remapped on startup to be hash-for-home. */
24
int atomic_locks[PAGE_SIZE / sizeof(int)] __page_aligned_bss;
25

26
int *__atomic_hashed_lock(volatile void *v)
27
{
28
	/* NOTE: this code must match "sys_cmpxchg" in kernel/intvec_32.S */
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
	/*
	 * Use bits [3, 3 + ATOMIC_HASH_SHIFT) as the lock index.
	 * Using mm works here because atomic_locks is page aligned.
	 */
	unsigned long ptr = __insn_mm((unsigned long)v >> 1,
				      (unsigned long)atomic_locks,
				      2, (ATOMIC_HASH_SHIFT + 2) - 1);
	return (int *)ptr;
}

#ifdef CONFIG_SMP
/* Return whether the passed pointer is a valid atomic lock pointer. */
static int is_atomic_lock(int *p)
{
	return p >= &atomic_locks[0] && p < &atomic_locks[ATOMIC_HASH_SIZE];
}

void __atomic_fault_unlock(int *irqlock_word)
{
	BUG_ON(!is_atomic_lock(irqlock_word));
	BUG_ON(*irqlock_word != 1);
	*irqlock_word = 0;
}

#endif /* CONFIG_SMP */

static inline int *__atomic_setup(volatile void *v)
{
	/* Issue a load to the target to bring it into cache. */
	*(volatile int *)v;
	return __atomic_hashed_lock(v);
}

C
Chris Metcalf 已提交
62
int _atomic_xchg(int *v, int n)
63
{
C
Chris Metcalf 已提交
64
	return __atomic_xchg(v, __atomic_setup(v), n).val;
65 66 67
}
EXPORT_SYMBOL(_atomic_xchg);

C
Chris Metcalf 已提交
68
int _atomic_xchg_add(int *v, int i)
69
{
C
Chris Metcalf 已提交
70
	return __atomic_xchg_add(v, __atomic_setup(v), i).val;
71 72 73
}
EXPORT_SYMBOL(_atomic_xchg_add);

C
Chris Metcalf 已提交
74
int _atomic_xchg_add_unless(int *v, int a, int u)
75 76 77 78 79 80
{
	/*
	 * Note: argument order is switched here since it is easier
	 * to use the first argument consistently as the "old value"
	 * in the assembly, as is done for _atomic_cmpxchg().
	 */
C
Chris Metcalf 已提交
81
	return __atomic_xchg_add_unless(v, __atomic_setup(v), u, a).val;
82 83 84
}
EXPORT_SYMBOL(_atomic_xchg_add_unless);

C
Chris Metcalf 已提交
85
int _atomic_cmpxchg(int *v, int o, int n)
86
{
C
Chris Metcalf 已提交
87
	return __atomic_cmpxchg(v, __atomic_setup(v), o, n).val;
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
}
EXPORT_SYMBOL(_atomic_cmpxchg);

unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask)
{
	return __atomic_or((int *)p, __atomic_setup(p), mask).val;
}
EXPORT_SYMBOL(_atomic_or);

unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask)
{
	return __atomic_andn((int *)p, __atomic_setup(p), mask).val;
}
EXPORT_SYMBOL(_atomic_andn);

unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask)
{
	return __atomic_xor((int *)p, __atomic_setup(p), mask).val;
}
EXPORT_SYMBOL(_atomic_xor);


C
Chris Metcalf 已提交
110
u64 _atomic64_xchg(u64 *v, u64 n)
111
{
C
Chris Metcalf 已提交
112
	return __atomic64_xchg(v, __atomic_setup(v), n);
113 114 115
}
EXPORT_SYMBOL(_atomic64_xchg);

C
Chris Metcalf 已提交
116
u64 _atomic64_xchg_add(u64 *v, u64 i)
117
{
C
Chris Metcalf 已提交
118
	return __atomic64_xchg_add(v, __atomic_setup(v), i);
119 120 121
}
EXPORT_SYMBOL(_atomic64_xchg_add);

C
Chris Metcalf 已提交
122
u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u)
123 124 125 126 127 128
{
	/*
	 * Note: argument order is switched here since it is easier
	 * to use the first argument consistently as the "old value"
	 * in the assembly, as is done for _atomic_cmpxchg().
	 */
C
Chris Metcalf 已提交
129
	return __atomic64_xchg_add_unless(v, __atomic_setup(v), u, a);
130 131 132
}
EXPORT_SYMBOL(_atomic64_xchg_add_unless);

C
Chris Metcalf 已提交
133
u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n)
134
{
C
Chris Metcalf 已提交
135
	return __atomic64_cmpxchg(v, __atomic_setup(v), o, n);
136 137 138 139 140 141 142 143 144 145 146 147 148 149
}
EXPORT_SYMBOL(_atomic64_cmpxchg);


/*
 * If any of the atomic or futex routines hit a bad address (not in
 * the page tables at kernel PL) this routine is called.  The futex
 * routines are never used on kernel space, and the normal atomics and
 * bitops are never used on user space.  So a fault on kernel space
 * must be fatal, but a fault on userspace is a futex fault and we
 * need to return -EFAULT.  Note that the context this routine is
 * invoked in is the context of the "_atomic_xxx()" routines called
 * by the functions in this file.
 */
150
struct __get_user __atomic_bad_address(int __user *addr)
151 152 153 154 155 156 157 158 159 160
{
	if (unlikely(!access_ok(VERIFY_WRITE, addr, sizeof(int))))
		panic("Bad address used for kernel atomic op: %p\n", addr);
	return (struct __get_user) { .err = -EFAULT };
}


void __init __init_atomic_per_cpu(void)
{
	/* Validate power-of-two and "bigger than cpus" assumption */
161
	BUILD_BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1));
162 163 164 165 166 167 168 169 170 171 172 173 174
	BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids);

	/*
	 * On TILEPro we prefer to use a single hash-for-home
	 * page, since this means atomic operations are less
	 * likely to encounter a TLB fault and thus should
	 * in general perform faster.  You may wish to disable
	 * this in situations where few hash-for-home tiles
	 * are configured.
	 */
	BUG_ON((unsigned long)atomic_locks % PAGE_SIZE != 0);

	/* The locks must all fit on one page. */
175
	BUILD_BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE);
176 177 178 179 180 181

	/*
	 * We use the page offset of the atomic value's address as
	 * an index into atomic_locks, excluding the low 3 bits.
	 * That should not produce more indices than ATOMIC_HASH_SIZE.
	 */
182
	BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
183
}