cmpxchg_32.h 9.1 KB
Newer Older
H
H. Peter Anvin 已提交
1 2
#ifndef _ASM_X86_CMPXCHG_32_H
#define _ASM_X86_CMPXCHG_32_H
J
Jeff Dike 已提交
3 4 5

#include <linux/bitops.h> /* for LOCK_PREFIX */

A
Avi Kivity 已提交
6 7 8 9 10
/*
 * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
 *       you need to test for the feature in boot_cpu_data.
 */

11 12 13
extern void __xchg_wrong_size(void);

/*
14 15 16 17
 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
 * Since this is generally used to protect other memory information, we
 * use "asm volatile" and "memory" clobbers to prevent gcc from moving
 * information around.
18 19 20 21 22 23
 */
#define __xchg(x, ptr, size)						\
({									\
	__typeof(*(ptr)) __x = (x);					\
	switch (size) {							\
	case 1:								\
24 25 26 27
	{								\
		volatile u8 *__ptr = (volatile u8 *)(ptr);		\
		asm volatile("xchgb %0,%1"				\
			     : "=q" (__x), "+m" (*__ptr)		\
28
			     : "0" (__x)				\
29 30
			     : "memory");				\
		break;							\
31
	}								\
32
	case 2:								\
33 34 35 36
	{								\
		volatile u16 *__ptr = (volatile u16 *)(ptr);		\
		asm volatile("xchgw %0,%1"				\
			     : "=r" (__x), "+m" (*__ptr)		\
37
			     : "0" (__x)				\
38 39
			     : "memory");				\
		break;							\
40
	}								\
41
	case 4:								\
42 43
	{								\
		volatile u32 *__ptr = (volatile u32 *)(ptr);		\
44
		asm volatile("xchgl %0,%1"				\
45
			     : "=r" (__x), "+m" (*__ptr)		\
46
			     : "0" (__x)				\
47 48
			     : "memory");				\
		break;							\
49
	}								\
50 51 52 53 54 55 56 57 58
	default:							\
		__xchg_wrong_size();					\
	}								\
	__x;								\
})

#define xchg(ptr, v)							\
	__xchg((v), (ptr), sizeof(*ptr))

J
Jeff Dike 已提交
59
/*
60 61 62 63 64
 * CMPXCHG8B only writes to the target if we had the previous
 * value in registers, otherwise it acts as a read and gives us the
 * "new previous" value.  That is why there is a loop.  Preloading
 * EDX:EAX is a performance optimization: in the common case it means
 * we need only one locked operation.
J
Jeff Dike 已提交
65
 *
66 67 68 69 70 71
 * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
 * least an FPU save and/or %cr0.ts manipulation.
 *
 * cmpxchg8b must be used with the lock prefix here to allow the
 * instruction to be executed atomically.  We need to have the reader
 * side to see the coherent 64bit value.
J
Jeff Dike 已提交
72
 */
73
static inline void set_64bit(volatile u64 *ptr, u64 value)
J
Jeff Dike 已提交
74
{
75 76 77 78
	u32 low  = value;
	u32 high = value >> 32;
	u64 prev = *ptr;

79
	asm volatile("\n1:\t"
80
		     LOCK_PREFIX "cmpxchg8b %0\n\t"
81
		     "jnz 1b"
82 83 84
		     : "=m" (*ptr), "+A" (prev)
		     : "b" (low), "c" (high)
		     : "memory");
J
Jeff Dike 已提交
85 86
}

87
extern void __cmpxchg_wrong_size(void);
J
Jeff Dike 已提交
88 89 90 91 92 93

/*
 * Atomic compare and exchange.  Compare OLD with MEM, if identical,
 * store NEW in MEM.  Return the initial value in MEM.  Success is
 * indicated by comparing RETURN with OLD.
 */
94 95 96 97 98 99 100
#define __raw_cmpxchg(ptr, old, new, size, lock)			\
({									\
	__typeof__(*(ptr)) __ret;					\
	__typeof__(*(ptr)) __old = (old);				\
	__typeof__(*(ptr)) __new = (new);				\
	switch (size) {							\
	case 1:								\
101 102 103 104
	{								\
		volatile u8 *__ptr = (volatile u8 *)(ptr);		\
		asm volatile(lock "cmpxchgb %2,%1"			\
			     : "=a" (__ret), "+m" (*__ptr)		\
105
			     : "q" (__new), "0" (__old)			\
106 107
			     : "memory");				\
		break;							\
108
	}								\
109
	case 2:								\
110 111 112 113
	{								\
		volatile u16 *__ptr = (volatile u16 *)(ptr);		\
		asm volatile(lock "cmpxchgw %2,%1"			\
			     : "=a" (__ret), "+m" (*__ptr)		\
114
			     : "r" (__new), "0" (__old)			\
115 116
			     : "memory");				\
		break;							\
117
	}								\
118
	case 4:								\
119 120
	{								\
		volatile u32 *__ptr = (volatile u32 *)(ptr);		\
121
		asm volatile(lock "cmpxchgl %2,%1"			\
122
			     : "=a" (__ret), "+m" (*__ptr)		\
123
			     : "r" (__new), "0" (__old)			\
124 125
			     : "memory");				\
		break;							\
126
	}								\
127 128 129 130 131 132 133 134 135 136 137 138 139 140
	default:							\
		__cmpxchg_wrong_size();					\
	}								\
	__ret;								\
})

#define __cmpxchg(ptr, old, new, size)					\
	__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)

#define __sync_cmpxchg(ptr, old, new, size)				\
	__raw_cmpxchg((ptr), (old), (new), (size), "lock; ")

#define __cmpxchg_local(ptr, old, new, size)				\
	__raw_cmpxchg((ptr), (old), (new), (size), "")
J
Jeff Dike 已提交
141 142 143

#ifdef CONFIG_X86_CMPXCHG
#define __HAVE_ARCH_CMPXCHG 1
144 145 146 147 148 149 150 151 152

#define cmpxchg(ptr, old, new)						\
	__cmpxchg((ptr), (old), (new), sizeof(*ptr))

#define sync_cmpxchg(ptr, old, new)					\
	__sync_cmpxchg((ptr), (old), (new), sizeof(*ptr))

#define cmpxchg_local(ptr, old, new)					\
	__cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
153 154 155
#endif

#ifdef CONFIG_X86_CMPXCHG64
156 157 158 159 160 161
#define cmpxchg64(ptr, o, n)						\
	((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
					 (unsigned long long)(n)))
#define cmpxchg64_local(ptr, o, n)					\
	((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
					       (unsigned long long)(n)))
J
Jeff Dike 已提交
162 163
#endif

164
static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
165
{
166
	u64 prev;
167 168
	asm volatile(LOCK_PREFIX "cmpxchg8b %1"
		     : "=A" (prev),
169 170 171
		       "+m" (*ptr)
		     : "b" ((u32)new),
		       "c" ((u32)(new >> 32)),
172
		       "0" (old)
173
		     : "memory");
174 175 176
	return prev;
}

177
static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
178
{
179
	u64 prev;
180 181
	asm volatile("cmpxchg8b %1"
		     : "=A" (prev),
182 183 184
		       "+m" (*ptr)
		     : "b" ((u32)new),
		       "c" ((u32)(new >> 32)),
185
		       "0" (old)
186
		     : "memory");
187 188 189
	return prev;
}

J
Jeff Dike 已提交
190 191 192 193 194 195 196 197 198 199 200 201
#ifndef CONFIG_X86_CMPXCHG
/*
 * Building a kernel capable running on 80386. It may be necessary to
 * simulate the cmpxchg on the 80386 CPU. For that purpose we define
 * a function for each of the sizes we support.
 */

extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);

static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
202
					unsigned long new, int size)
J
Jeff Dike 已提交
203 204 205 206 207 208 209 210 211 212 213 214
{
	switch (size) {
	case 1:
		return cmpxchg_386_u8(ptr, old, new);
	case 2:
		return cmpxchg_386_u16(ptr, old, new);
	case 4:
		return cmpxchg_386_u32(ptr, old, new);
	}
	return old;
}

215
#define cmpxchg(ptr, o, n)						\
J
Jeff Dike 已提交
216 217 218
({									\
	__typeof__(*(ptr)) __ret;					\
	if (likely(boot_cpu_data.x86 > 3))				\
219 220 221
		__ret = (__typeof__(*(ptr)))__cmpxchg((ptr),		\
				(unsigned long)(o), (unsigned long)(n),	\
				sizeof(*(ptr)));			\
J
Jeff Dike 已提交
222
	else								\
223 224 225
		__ret = (__typeof__(*(ptr)))cmpxchg_386((ptr),		\
				(unsigned long)(o), (unsigned long)(n),	\
				sizeof(*(ptr)));			\
J
Jeff Dike 已提交
226 227
	__ret;								\
})
228
#define cmpxchg_local(ptr, o, n)					\
J
Jeff Dike 已提交
229 230 231
({									\
	__typeof__(*(ptr)) __ret;					\
	if (likely(boot_cpu_data.x86 > 3))				\
232 233 234
		__ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr),	\
				(unsigned long)(o), (unsigned long)(n),	\
				sizeof(*(ptr)));			\
J
Jeff Dike 已提交
235
	else								\
236 237 238
		__ret = (__typeof__(*(ptr)))cmpxchg_386((ptr),		\
				(unsigned long)(o), (unsigned long)(n),	\
				sizeof(*(ptr)));			\
J
Jeff Dike 已提交
239 240 241 242
	__ret;								\
})
#endif

243 244 245 246 247
#ifndef CONFIG_X86_CMPXCHG64
/*
 * Building a kernel capable running on 80386 and 80486. It may be necessary
 * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
 */
J
Jeff Dike 已提交
248

249 250 251 252 253
#define cmpxchg64(ptr, o, n)					\
({								\
	__typeof__(*(ptr)) __ret;				\
	__typeof__(*(ptr)) __old = (o);				\
	__typeof__(*(ptr)) __new = (n);				\
254 255
	alternative_io(LOCK_PREFIX_HERE				\
			"call cmpxchg8b_emu",			\
256 257 258 259 260 261 262 263 264 265
			"lock; cmpxchg8b (%%esi)" ,		\
		       X86_FEATURE_CX8,				\
		       "=A" (__ret),				\
		       "S" ((ptr)), "0" (__old),		\
		       "b" ((unsigned int)__new),		\
		       "c" ((unsigned int)(__new>>32))		\
		       : "memory");				\
	__ret; })


266 267 268 269 270 271 272 273 274 275 276 277 278 279
#define cmpxchg64_local(ptr, o, n)				\
({								\
	__typeof__(*(ptr)) __ret;				\
	__typeof__(*(ptr)) __old = (o);				\
	__typeof__(*(ptr)) __new = (n);				\
	alternative_io("call cmpxchg8b_emu",			\
		       "cmpxchg8b (%%esi)" ,			\
		       X86_FEATURE_CX8,				\
		       "=A" (__ret),				\
		       "S" ((ptr)), "0" (__old),		\
		       "b" ((unsigned int)__new),		\
		       "c" ((unsigned int)(__new>>32))		\
		       : "memory");				\
	__ret; })
280 281

#endif
J
Jeff Dike 已提交
282

283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
#define cmpxchg8b(ptr, o1, o2, n1, n2)				\
({								\
	char __ret;						\
	__typeof__(o2) __dummy;					\
	__typeof__(*(ptr)) __old1 = (o1);			\
	__typeof__(o2) __old2 = (o2);				\
	__typeof__(*(ptr)) __new1 = (n1);			\
	__typeof__(o2) __new2 = (n2);				\
	asm volatile(LOCK_PREFIX "cmpxchg8b %2; setz %1"	\
		       : "=d"(__dummy), "=a" (__ret), "+m" (*ptr)\
		       : "a" (__old1), "d"(__old2),		\
		         "b" (__new1), "c" (__new2)		\
		       : "memory");				\
	__ret; })


#define cmpxchg8b_local(ptr, o1, o2, n1, n2)			\
({								\
	char __ret;						\
	__typeof__(o2) __dummy;					\
	__typeof__(*(ptr)) __old1 = (o1);			\
	__typeof__(o2) __old2 = (o2);				\
	__typeof__(*(ptr)) __new1 = (n1);			\
	__typeof__(o2) __new2 = (n2);				\
	asm volatile("cmpxchg8b %2; setz %1"			\
		       : "=d"(__dummy), "=a"(__ret), "+m" (*ptr)\
		       : "a" (__old), "d"(__old2),		\
		         "b" (__new1), "c" (__new2),		\
		       : "memory");				\
	__ret; })


#define cmpxchg_double(ptr, o1, o2, n1, n2)				\
({									\
	BUILD_BUG_ON(sizeof(*(ptr)) != 4);				\
	VM_BUG_ON((unsigned long)(ptr) % 8);				\
	cmpxchg8b((ptr), (o1), (o2), (n1), (n2));			\
})

#define cmpxchg_double_local(ptr, o1, o2, n1, n2)			\
({									\
       BUILD_BUG_ON(sizeof(*(ptr)) != 4);				\
       VM_BUG_ON((unsigned long)(ptr) % 8);				\
       cmpxchg16b_local((ptr), (o1), (o2), (n1), (n2));			\
})

#define system_has_cmpxchg_double() cpu_has_cx8

H
H. Peter Anvin 已提交
331
#endif /* _ASM_X86_CMPXCHG_32_H */