mmu_context.h 4.9 KB
Newer Older
H
H. Peter Anvin 已提交
1 2
#ifndef _ASM_X86_MMU_CONTEXT_H
#define _ASM_X86_MMU_CONTEXT_H
J
Jeremy Fitzhardinge 已提交
3 4

#include <asm/desc.h>
A
Arun Sharma 已提交
5
#include <linux/atomic.h>
6 7 8 9
#include <linux/mm_types.h>

#include <trace/events/tlb.h>

J
Jeremy Fitzhardinge 已提交
10 11 12
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/paravirt.h>
13
#include <asm/mpx.h>
J
Jeremy Fitzhardinge 已提交
14 15 16 17 18 19 20
#ifndef CONFIG_PARAVIRT
static inline void paravirt_activate_mm(struct mm_struct *prev,
					struct mm_struct *next)
{
}
#endif	/* !CONFIG_PARAVIRT */

21
#ifdef CONFIG_PERF_EVENTS
22 23
extern struct static_key rdpmc_always_available;

24 25
static inline void load_mm_cr4(struct mm_struct *mm)
{
26 27
	if (static_key_true(&rdpmc_always_available) ||
	    atomic_read(&mm->context.perf_rdpmc_allowed))
28 29 30 31 32 33 34 35
		cr4_set_bits(X86_CR4_PCE);
	else
		cr4_clear_bits(X86_CR4_PCE);
}
#else
static inline void load_mm_cr4(struct mm_struct *mm) {}
#endif

J
Jeremy Fitzhardinge 已提交
36 37 38 39 40 41
/*
 * Used for LDT copy/destruction.
 */
int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
void destroy_context(struct mm_struct *mm);

B
Brian Gerst 已提交
42 43 44 45

static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
#ifdef CONFIG_SMP
46 47
	if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
		this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
B
Brian Gerst 已提交
48 49 50 51 52 53 54 55 56 57
#endif
}

static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
			     struct task_struct *tsk)
{
	unsigned cpu = smp_processor_id();

	if (likely(prev != next)) {
#ifdef CONFIG_SMP
58 59
		this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
		this_cpu_write(cpu_tlbstate.active_mm, next);
60
#endif
61
		cpumask_set_cpu(cpu, mm_cpumask(next));
B
Brian Gerst 已提交
62 63 64

		/* Re-load page tables */
		load_cr3(next->pgd);
65
		trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
B
Brian Gerst 已提交
66

67
		/* Stop flush ipis for the previous mm */
68 69
		cpumask_clear_cpu(cpu, mm_cpumask(prev));

70 71 72
		/* Load per-mm CR4 state */
		load_mm_cr4(next);

73 74 75
		/*
		 * Load the LDT, if the LDT is different.
		 *
76 77 78 79 80 81 82 83
		 * It's possible that prev->context.ldt doesn't match
		 * the LDT register.  This can happen if leave_mm(prev)
		 * was called and then modify_ldt changed
		 * prev->context.ldt but suppressed an IPI to this CPU.
		 * In this case, prev->context.ldt != NULL, because we
		 * never free an LDT while the mm still exists.  That
		 * means that next->context.ldt != prev->context.ldt,
		 * because mms never share an LDT.
84
		 */
B
Brian Gerst 已提交
85 86 87 88
		if (unlikely(prev->context.ldt != next->context.ldt))
			load_LDT_nolock(&next->context);
	}
#ifdef CONFIG_SMP
89
	  else {
90 91
		this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
		BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
B
Brian Gerst 已提交
92

93 94 95 96 97 98 99 100 101 102
		if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
			/*
			 * On established mms, the mm_cpumask is only changed
			 * from irq context, from ptep_clear_flush() while in
			 * lazy tlb mode, and here. Irqs are blocked during
			 * schedule, protecting us from simultaneous changes.
			 */
			cpumask_set_cpu(cpu, mm_cpumask(next));
			/*
			 * We were in lazy tlb mode and leave_mm disabled
B
Brian Gerst 已提交
103 104 105 106
			 * tlb flush IPI delivery. We must reload CR3
			 * to make sure to use no freed page tables.
			 */
			load_cr3(next->pgd);
107
			trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
108
			load_mm_cr4(next);
B
Brian Gerst 已提交
109 110 111 112 113
			load_LDT_nolock(&next->context);
		}
	}
#endif
}
J
Jeremy Fitzhardinge 已提交
114 115 116 117 118 119 120

#define activate_mm(prev, next)			\
do {						\
	paravirt_activate_mm((prev), (next));	\
	switch_mm((prev), (next), NULL);	\
} while (0);

B
Brian Gerst 已提交
121 122 123
#ifdef CONFIG_X86_32
#define deactivate_mm(tsk, mm)			\
do {						\
124
	lazy_load_gs(0);			\
B
Brian Gerst 已提交
125 126 127 128 129 130 131 132
} while (0)
#else
#define deactivate_mm(tsk, mm)			\
do {						\
	load_gs_index(0);			\
	loadsegment(fs, 0);			\
} while (0)
#endif
J
Jeremy Fitzhardinge 已提交
133

134 135 136 137 138 139 140 141 142 143 144
static inline void arch_dup_mmap(struct mm_struct *oldmm,
				 struct mm_struct *mm)
{
	paravirt_arch_dup_mmap(oldmm, mm);
}

static inline void arch_exit_mmap(struct mm_struct *mm)
{
	paravirt_arch_exit_mmap(mm);
}

145 146 147 148 149 150 151 152 153 154 155 156 157
#ifdef CONFIG_X86_64
static inline bool is_64bit_mm(struct mm_struct *mm)
{
	return	!config_enabled(CONFIG_IA32_EMULATION) ||
		!(mm->context.ia32_compat == TIF_IA32);
}
#else
static inline bool is_64bit_mm(struct mm_struct *mm)
{
	return false;
}
#endif

158 159 160 161 162 163
static inline void arch_bprm_mm_init(struct mm_struct *mm,
		struct vm_area_struct *vma)
{
	mpx_mm_init(mm);
}

164 165 166
static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
			      unsigned long start, unsigned long end)
{
167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
	/*
	 * mpx_notify_unmap() goes and reads a rarely-hot
	 * cacheline in the mm_struct.  That can be expensive
	 * enough to be seen in profiles.
	 *
	 * The mpx_notify_unmap() call and its contents have been
	 * observed to affect munmap() performance on hardware
	 * where MPX is not present.
	 *
	 * The unlikely() optimizes for the fast case: no MPX
	 * in the CPU, or no MPX use in the process.  Even if
	 * we get this wrong (in the unlikely event that MPX
	 * is widely enabled on some system) the overhead of
	 * MPX itself (reading bounds tables) is expected to
	 * overwhelm the overhead of getting this unlikely()
	 * consistently wrong.
	 */
	if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
		mpx_notify_unmap(mm, vma, start, end);
186 187
}

H
H. Peter Anvin 已提交
188
#endif /* _ASM_X86_MMU_CONTEXT_H */