atomic.h 4.0 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
L
Linus Torvalds 已提交
2
/*
3
 * Copyright IBM Corp. 1999, 2016
4 5
 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
 *	      Denis Joseph Barrow,
6
 *	      Arnd Bergmann,
L
Linus Torvalds 已提交
7 8
 */

9 10 11
#ifndef __ARCH_S390_ATOMIC__
#define __ARCH_S390_ATOMIC__

12 13
#include <linux/compiler.h>
#include <linux/types.h>
14
#include <asm/atomic_ops.h>
15
#include <asm/barrier.h>
16
#include <asm/cmpxchg.h>
L
Linus Torvalds 已提交
17 18 19

#define ATOMIC_INIT(i)  { (i) }

20 21
static inline int atomic_read(const atomic_t *v)
{
H
Heiko Carstens 已提交
22 23 24 25 26 27
	int c;

	asm volatile(
		"	l	%0,%1\n"
		: "=d" (c) : "Q" (v->counter));
	return c;
28 29 30 31
}

static inline void atomic_set(atomic_t *v, int i)
{
H
Heiko Carstens 已提交
32 33 34
	asm volatile(
		"	st	%1,%0\n"
		: "=Q" (v->counter) : "d" (i));
35
}
L
Linus Torvalds 已提交
36

37
static inline int atomic_add_return(int i, atomic_t *v)
L
Linus Torvalds 已提交
38
{
39
	return __atomic_add_barrier(i, &v->counter) + i;
L
Linus Torvalds 已提交
40
}
41

42 43
static inline int atomic_fetch_add(int i, atomic_t *v)
{
44
	return __atomic_add_barrier(i, &v->counter);
45 46
}

47 48 49 50
static inline void atomic_add(int i, atomic_t *v)
{
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
	if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
51
		__atomic_add_const(i, &v->counter);
52
		return;
53 54
	}
#endif
55
	__atomic_add(i, &v->counter);
56 57 58
}

#define atomic_inc(_v)			atomic_add(1, _v)
59
#define atomic_inc_return(_v)		atomic_add_return(1, _v)
60
#define atomic_sub(_i, _v)		atomic_add(-(int)(_i), _v)
61
#define atomic_sub_return(_i, _v)	atomic_add_return(-(int)(_i), _v)
62
#define atomic_fetch_sub(_i, _v)	atomic_fetch_add(-(int)(_i), _v)
63
#define atomic_dec(_v)			atomic_sub(1, _v)
64
#define atomic_dec_return(_v)		atomic_sub_return(1, _v)
L
Linus Torvalds 已提交
65

66
#define ATOMIC_OPS(op)							\
67 68
static inline void atomic_##op(int i, atomic_t *v)			\
{									\
69
	__atomic_##op(i, &v->counter);					\
70 71 72
}									\
static inline int atomic_fetch_##op(int i, atomic_t *v)			\
{									\
73
	return __atomic_##op##_barrier(i, &v->counter);			\
74 75
}

76 77 78
ATOMIC_OPS(and)
ATOMIC_OPS(or)
ATOMIC_OPS(xor)
79

80
#undef ATOMIC_OPS
81

82 83
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))

84
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
85
{
86
	return __atomic_cmpxchg(&v->counter, old, new);
87 88
}

L
Linus Torvalds 已提交
89 90
#define ATOMIC64_INIT(i)  { (i) }

91
static inline long atomic64_read(const atomic64_t *v)
92
{
93
	long c;
H
Heiko Carstens 已提交
94 95 96 97 98

	asm volatile(
		"	lg	%0,%1\n"
		: "=d" (c) : "Q" (v->counter));
	return c;
99 100
}

101
static inline void atomic64_set(atomic64_t *v, long i)
102
{
H
Heiko Carstens 已提交
103 104 105
	asm volatile(
		"	stg	%1,%0\n"
		: "=Q" (v->counter) : "d" (i));
106
}
L
Linus Torvalds 已提交
107

108
static inline long atomic64_add_return(long i, atomic64_t *v)
L
Linus Torvalds 已提交
109
{
110
	return __atomic64_add_barrier(i, &v->counter) + i;
111 112
}

113
static inline long atomic64_fetch_add(long i, atomic64_t *v)
114
{
115
	return __atomic64_add_barrier(i, &v->counter);
116 117
}

118
static inline void atomic64_add(long i, atomic64_t *v)
119 120 121
{
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
	if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
122
		__atomic64_add_const(i, &v->counter);
123 124 125
		return;
	}
#endif
126
	__atomic64_add(i, &v->counter);
L
Linus Torvalds 已提交
127
}
128

129 130
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))

131
static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
132
{
133
	return __atomic64_cmpxchg(&v->counter, old, new);
134
}
L
Linus Torvalds 已提交
135

136
#define ATOMIC64_OPS(op)						\
137 138
static inline void atomic64_##op(long i, atomic64_t *v)			\
{									\
139
	__atomic64_##op(i, &v->counter);				\
140 141 142
}									\
static inline long atomic64_fetch_##op(long i, atomic64_t *v)		\
{									\
143
	return __atomic64_##op##_barrier(i, &v->counter);		\
144 145
}

146 147 148
ATOMIC64_OPS(and)
ATOMIC64_OPS(or)
ATOMIC64_OPS(xor)
149

150
#undef ATOMIC64_OPS
151

152
static inline long atomic64_dec_if_positive(atomic64_t *v)
153
{
154
	long c, old, dec;
155 156 157 158 159 160 161 162 163 164 165 166 167 168

	c = atomic64_read(v);
	for (;;) {
		dec = c - 1;
		if (unlikely(dec < 0))
			break;
		old = atomic64_cmpxchg((v), c, dec);
		if (likely(old == c))
			break;
		c = old;
	}
	return dec;
}

169
#define atomic64_inc(_v)		atomic64_add(1, _v)
170
#define atomic64_inc_return(_v)		atomic64_add_return(1, _v)
171 172 173
#define atomic64_sub_return(_i, _v)	atomic64_add_return(-(long)(_i), _v)
#define atomic64_fetch_sub(_i, _v)	atomic64_fetch_add(-(long)(_i), _v)
#define atomic64_sub(_i, _v)		atomic64_add(-(long)(_i), _v)
174
#define atomic64_dec(_v)		atomic64_sub(1, _v)
175
#define atomic64_dec_return(_v)		atomic64_sub_return(1, _v)
N
Nick Piggin 已提交
176

L
Linus Torvalds 已提交
177
#endif /* __ARCH_S390_ATOMIC__  */