barrier.h 2.8 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4 5 6 7 8 9
#ifndef _ASM_X86_BARRIER_H
#define _ASM_X86_BARRIER_H

#include <asm/alternative.h>
#include <asm/nops.h>

/*
 * Force strict CPU ordering.
10
 * And yes, this might be required on UP too when we're talking
11 12 13 14
 * to devices.
 */

#ifdef CONFIG_X86_32
15
#define mb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "mfence", \
16
				      X86_FEATURE_XMM2) ::: "memory", "cc")
17
#define rmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "lfence", \
18
				       X86_FEATURE_XMM2) ::: "memory", "cc")
19
#define wmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "sfence", \
20
				       X86_FEATURE_XMM2) ::: "memory", "cc")
21 22 23 24 25 26
#else
#define mb() 	asm volatile("mfence":::"memory")
#define rmb()	asm volatile("lfence":::"memory")
#define wmb()	asm volatile("sfence" ::: "memory")
#endif

27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50
/**
 * array_index_mask_nospec() - generate a mask that is ~0UL when the
 * 	bounds check succeeds and 0 otherwise
 * @index: array element index
 * @size: number of elements in array
 *
 * Returns:
 *     0 - (index < size)
 */
static inline unsigned long array_index_mask_nospec(unsigned long index,
		unsigned long size)
{
	unsigned long mask;

	asm ("cmp %1,%2; sbb %0,%0;"
			:"=r" (mask)
			:"r"(size),"r" (index)
			:"cc");
	return mask;
}

/* Override the default implementation from linux/nospec.h. */
#define array_index_mask_nospec array_index_mask_nospec

51
#ifdef CONFIG_X86_PPRO_FENCE
52
#define dma_rmb()	rmb()
53
#else
54
#define dma_rmb()	barrier()
55
#endif
56 57
#define dma_wmb()	barrier()

58 59 60 61 62
#ifdef CONFIG_X86_32
#define __smp_mb()	asm volatile("lock; addl $0,-4(%%esp)" ::: "memory", "cc")
#else
#define __smp_mb()	asm volatile("lock; addl $0,-4(%%rsp)" ::: "memory", "cc")
#endif
M
Michael S. Tsirkin 已提交
63 64 65
#define __smp_rmb()	dma_rmb()
#define __smp_wmb()	barrier()
#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
66

D
Dave Jones 已提交
67
#if defined(CONFIG_X86_PPRO_FENCE)
68 69

/*
70
 * For this option x86 doesn't have a strong TSO memory
71 72 73
 * model and we should fall back to full barriers.
 */

M
Michael S. Tsirkin 已提交
74
#define __smp_store_release(p, v)					\
75 76
do {									\
	compiletime_assert_atomic_type(*p);				\
M
Michael S. Tsirkin 已提交
77
	__smp_mb();							\
78
	WRITE_ONCE(*p, v);						\
79 80
} while (0)

M
Michael S. Tsirkin 已提交
81
#define __smp_load_acquire(p)						\
82
({									\
83
	typeof(*p) ___p1 = READ_ONCE(*p);				\
84
	compiletime_assert_atomic_type(*p);				\
M
Michael S. Tsirkin 已提交
85
	__smp_mb();							\
86 87 88 89 90
	___p1;								\
})

#else /* regular x86 TSO memory ordering */

M
Michael S. Tsirkin 已提交
91
#define __smp_store_release(p, v)					\
92 93 94
do {									\
	compiletime_assert_atomic_type(*p);				\
	barrier();							\
95
	WRITE_ONCE(*p, v);						\
96 97
} while (0)

M
Michael S. Tsirkin 已提交
98
#define __smp_load_acquire(p)						\
99
({									\
100
	typeof(*p) ___p1 = READ_ONCE(*p);				\
101 102 103 104 105
	compiletime_assert_atomic_type(*p);				\
	barrier();							\
	___p1;								\
})

106 107
#endif

P
Peter Zijlstra 已提交
108
/* Atomic operations are already serializing on x86 */
M
Michael S. Tsirkin 已提交
109 110
#define __smp_mb__before_atomic()	barrier()
#define __smp_mb__after_atomic()	barrier()
P
Peter Zijlstra 已提交
111

112 113
#include <asm-generic/barrier.h>

114
#endif /* _ASM_X86_BARRIER_H */