atomic_64.h 4.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Copyright 2011 Tilera Corporation. All Rights Reserved.
 *
 *   This program is free software; you can redistribute it and/or
 *   modify it under the terms of the GNU General Public License
 *   as published by the Free Software Foundation, version 2.
 *
 *   This program is distributed in the hope that it will be useful, but
 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 *   NON INFRINGEMENT.  See the GNU General Public License for
 *   more details.
 *
A
Arun Sharma 已提交
14
 * Do not include directly; use <linux/atomic.h>.
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
 */

#ifndef _ASM_TILE_ATOMIC_64_H
#define _ASM_TILE_ATOMIC_64_H

#ifndef __ASSEMBLY__

#include <arch/spr_def.h>

/* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */

#define atomic_set(v, i) ((v)->counter = (i))

/*
 * The smp_mb() operations throughout are to support the fact that
 * Linux requires memory barriers before and after the operation,
 * on any routine which updates memory and returns a value.
 */

static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
{
	int val;
	__insn_mtspr(SPR_CMPEXCH_VALUE, o);
	smp_mb();  /* barrier for proper semantics */
	val = __insn_cmpexch4((void *)&v->counter, n);
	smp_mb();  /* barrier for proper semantics */
	return val;
}

static inline int atomic_xchg(atomic_t *v, int n)
{
	int val;
	smp_mb();  /* barrier for proper semantics */
	val = __insn_exch4((void *)&v->counter, n);
	smp_mb();  /* barrier for proper semantics */
	return val;
}

static inline void atomic_add(int i, atomic_t *v)
{
	__insn_fetchadd4((void *)&v->counter, i);
}

static inline int atomic_add_return(int i, atomic_t *v)
{
	int val;
	smp_mb();  /* barrier for proper semantics */
	val = __insn_fetchadd4((void *)&v->counter, i) + i;
	barrier();  /* the "+ i" above will wait on memory */
	return val;
}

67
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
68 69 70 71 72 73 74 75
{
	int guess, oldval = v->counter;
	do {
		if (oldval == u)
			break;
		guess = oldval;
		oldval = atomic_cmpxchg(v, guess, guess + a);
	} while (guess != oldval);
76
	return oldval;
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
}

/* Now the true 64-bit operations. */

#define ATOMIC64_INIT(i)	{ (i) }

#define atomic64_read(v)		((v)->counter)
#define atomic64_set(v, i) ((v)->counter = (i))

static inline long atomic64_cmpxchg(atomic64_t *v, long o, long n)
{
	long val;
	smp_mb();  /* barrier for proper semantics */
	__insn_mtspr(SPR_CMPEXCH_VALUE, o);
	val = __insn_cmpexch((void *)&v->counter, n);
	smp_mb();  /* barrier for proper semantics */
	return val;
}

static inline long atomic64_xchg(atomic64_t *v, long n)
{
	long val;
	smp_mb();  /* barrier for proper semantics */
	val = __insn_exch((void *)&v->counter, n);
	smp_mb();  /* barrier for proper semantics */
	return val;
}

static inline void atomic64_add(long i, atomic64_t *v)
{
	__insn_fetchadd((void *)&v->counter, i);
}

static inline long atomic64_add_return(long i, atomic64_t *v)
{
	int val;
	smp_mb();  /* barrier for proper semantics */
	val = __insn_fetchadd((void *)&v->counter, i) + i;
	barrier();  /* the "+ i" above will wait on memory */
	return val;
}

static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
{
	long guess, oldval = v->counter;
	do {
		if (oldval == u)
			break;
		guess = oldval;
		oldval = atomic64_cmpxchg(v, guess, guess + a);
	} while (guess != oldval);
	return oldval != u;
}

#define atomic64_sub_return(i, v)	atomic64_add_return(-(i), (v))
#define atomic64_sub(i, v)		atomic64_add(-(i), (v))
#define atomic64_inc_return(v)		atomic64_add_return(1, (v))
#define atomic64_dec_return(v)		atomic64_sub_return(1, (v))
#define atomic64_inc(v)			atomic64_add(1, (v))
#define atomic64_dec(v)			atomic64_sub(1, (v))

#define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
#define atomic64_dec_and_test(v)	(atomic64_dec_return(v) == 0)
#define atomic64_sub_and_test(i, v)	(atomic64_sub_return((i), (v)) == 0)
#define atomic64_add_negative(i, v)	(atomic64_add_return((i), (v)) < 0)

#define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1, 0)

/* Atomic dec and inc don't implement barrier, so provide them if needed. */
#define smp_mb__before_atomic_dec()	smp_mb()
#define smp_mb__after_atomic_dec()	smp_mb()
#define smp_mb__before_atomic_inc()	smp_mb()
#define smp_mb__after_atomic_inc()	smp_mb()

151 152
/* Define this to indicate that cmpxchg is an efficient operation. */
#define __HAVE_ARCH_CMPXCHG
153 154 155 156

#endif /* !__ASSEMBLY__ */

#endif /* _ASM_TILE_ATOMIC_64_H */