barrier.h 2.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
#ifndef _ASM_X86_BARRIER_H
#define _ASM_X86_BARRIER_H

#include <asm/alternative.h>
#include <asm/nops.h>

/*
 * Force strict CPU ordering.
 * And yes, this is required on UP too when we're talking
 * to devices.
 */

#ifdef CONFIG_X86_32
/*
 * Some non-Intel clones support out of order store. wmb() ceases to be a
 * nop for these.
 */
#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
#else
#define mb() 	asm volatile("mfence":::"memory")
#define rmb()	asm volatile("lfence":::"memory")
#define wmb()	asm volatile("sfence" ::: "memory")
#endif

#ifdef CONFIG_X86_PPRO_FENCE
28
#define dma_rmb()	rmb()
29
#else
30
#define dma_rmb()	barrier()
31
#endif
32 33 34 35 36
#define dma_wmb()	barrier()

#ifdef CONFIG_SMP
#define smp_mb()	mb()
#define smp_rmb()	dma_rmb()
D
Dave Jones 已提交
37
#define smp_wmb()	barrier()
38
#define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
39
#else /* !SMP */
40 41 42
#define smp_mb()	barrier()
#define smp_rmb()	barrier()
#define smp_wmb()	barrier()
43
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
44 45
#endif /* SMP */

46 47 48
#define read_barrier_depends()		do { } while (0)
#define smp_read_barrier_depends()	do { } while (0)

D
Dave Jones 已提交
49
#if defined(CONFIG_X86_PPRO_FENCE)
50 51

/*
52
 * For this option x86 doesn't have a strong TSO memory
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
 * model and we should fall back to full barriers.
 */

#define smp_store_release(p, v)						\
do {									\
	compiletime_assert_atomic_type(*p);				\
	smp_mb();							\
	ACCESS_ONCE(*p) = (v);						\
} while (0)

#define smp_load_acquire(p)						\
({									\
	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
	compiletime_assert_atomic_type(*p);				\
	smp_mb();							\
	___p1;								\
})

#else /* regular x86 TSO memory ordering */

#define smp_store_release(p, v)						\
do {									\
	compiletime_assert_atomic_type(*p);				\
	barrier();							\
	ACCESS_ONCE(*p) = (v);						\
} while (0)

#define smp_load_acquire(p)						\
({									\
	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
	compiletime_assert_atomic_type(*p);				\
	barrier();							\
	___p1;								\
})

88 89
#endif

P
Peter Zijlstra 已提交
90 91 92 93
/* Atomic operations are already serializing on x86 */
#define smp_mb__before_atomic()	barrier()
#define smp_mb__after_atomic()	barrier()

94 95 96 97 98 99 100
/*
 * Stop RDTSC speculation. This is needed when you need to use RDTSC
 * (or get_cycles or vread that possibly accesses the TSC) in a defined
 * code region.
 */
static __always_inline void rdtsc_barrier(void)
{
101 102
	alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC,
			  "lfence", X86_FEATURE_LFENCE_RDTSC);
103 104 105
}

#endif /* _ASM_X86_BARRIER_H */