tlbflush.h 7.5 KB
Newer Older
H
H. Peter Anvin 已提交
1 2
#ifndef _ASM_X86_TLBFLUSH_H
#define _ASM_X86_TLBFLUSH_H
T
Thomas Gleixner 已提交
3 4 5 6 7

#include <linux/mm.h>
#include <linux/sched.h>

#include <asm/processor.h>
8
#include <asm/cpufeature.h>
9
#include <asm/special_insns.h>
10
#include <asm/smp.h>
T
Thomas Gleixner 已提交
11

A
Andy Lutomirski 已提交
12 13 14
static inline void __invpcid(unsigned long pcid, unsigned long addr,
			     unsigned long type)
{
15
	struct { u64 d[2]; } desc = { { pcid, addr } };
A
Andy Lutomirski 已提交
16 17 18 19 20 21 22 23 24 25 26

	/*
	 * The memory clobber is because the whole point is to invalidate
	 * stale TLB entries and, especially if we're flushing global
	 * mappings, we don't want the compiler to reorder any subsequent
	 * memory accesses before the TLB flush.
	 *
	 * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
	 * invpcid (%rcx), %rax in long mode.
	 */
	asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
27
		      : : "m" (desc), "a" (type), "c" (&desc) : "memory");
A
Andy Lutomirski 已提交
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
}

#define INVPCID_TYPE_INDIV_ADDR		0
#define INVPCID_TYPE_SINGLE_CTXT	1
#define INVPCID_TYPE_ALL_INCL_GLOBAL	2
#define INVPCID_TYPE_ALL_NON_GLOBAL	3

/* Flush all mappings for a given pcid and addr, not including globals. */
static inline void invpcid_flush_one(unsigned long pcid,
				     unsigned long addr)
{
	__invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
}

/* Flush all mappings for a given PCID, not including globals. */
static inline void invpcid_flush_single_context(unsigned long pcid)
{
	__invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
}

/* Flush all mappings, including globals, for all PCIDs. */
static inline void invpcid_flush_all(void)
{
	__invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
}

/* Flush all mappings for all PCIDs except globals. */
static inline void invpcid_flush_all_nonglobals(void)
{
	__invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
}

60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
{
	u64 new_tlb_gen;

	/*
	 * Bump the generation count.  This also serves as a full barrier
	 * that synchronizes with switch_mm(): callers are required to order
	 * their read of mm_cpumask after their writes to the paging
	 * structures.
	 */
	smp_mb__before_atomic();
	new_tlb_gen = atomic64_inc_return(&mm->context.tlb_gen);
	smp_mb__after_atomic();

	return new_tlb_gen;
}

T
Thomas Gleixner 已提交
77 78 79 80 81 82 83 84
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
#define __flush_tlb() __native_flush_tlb()
#define __flush_tlb_global() __native_flush_tlb_global()
#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
#endif

85
struct tlb_state {
86 87 88 89 90 91 92
	/*
	 * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts
	 * are on.  This means that it may not match current->active_mm,
	 * which will contain the previous user mm when we're in lazy TLB
	 * mode even if we've already switched back to swapper_pg_dir.
	 */
	struct mm_struct *loaded_mm;
93 94 95 96 97 98 99 100 101 102 103 104 105
	int state;

	/*
	 * Access to this CR4 shadow and to H/W CR4 is protected by
	 * disabling interrupts when modifying either one.
	 */
	unsigned long cr4;
};
DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);

/* Initialize cr4 shadow for this CPU. */
static inline void cr4_init_shadow(void)
{
106
	this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
107 108
}

A
Andy Lutomirski 已提交
109 110 111 112 113
/* Set in this cpu's CR4. */
static inline void cr4_set_bits(unsigned long mask)
{
	unsigned long cr4;

114 115 116 117 118 119
	cr4 = this_cpu_read(cpu_tlbstate.cr4);
	if ((cr4 | mask) != cr4) {
		cr4 |= mask;
		this_cpu_write(cpu_tlbstate.cr4, cr4);
		__write_cr4(cr4);
	}
A
Andy Lutomirski 已提交
120 121 122 123 124 125 126
}

/* Clear in this cpu's CR4. */
static inline void cr4_clear_bits(unsigned long mask)
{
	unsigned long cr4;

127 128 129 130 131 132 133 134
	cr4 = this_cpu_read(cpu_tlbstate.cr4);
	if ((cr4 & ~mask) != cr4) {
		cr4 &= ~mask;
		this_cpu_write(cpu_tlbstate.cr4, cr4);
		__write_cr4(cr4);
	}
}

135 136 137 138 139 140 141 142 143 144
static inline void cr4_toggle_bits(unsigned long mask)
{
	unsigned long cr4;

	cr4 = this_cpu_read(cpu_tlbstate.cr4);
	cr4 ^= mask;
	this_cpu_write(cpu_tlbstate.cr4, cr4);
	__write_cr4(cr4);
}

145 146 147 148
/* Read the CR4 shadow. */
static inline unsigned long cr4_read_shadow(void)
{
	return this_cpu_read(cpu_tlbstate.cr4);
A
Andy Lutomirski 已提交
149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
}

/*
 * Save some of cr4 feature set we're using (e.g.  Pentium 4MB
 * enable and PPro Global page enable), so that any CPU's that boot
 * up after us can get the correct flags.  This should only be used
 * during boot on the boot cpu.
 */
extern unsigned long mmu_cr4_features;
extern u32 *trampoline_cr4_features;

static inline void cr4_set_bits_and_update_boot(unsigned long mask)
{
	mmu_cr4_features |= mask;
	if (trampoline_cr4_features)
		*trampoline_cr4_features = mmu_cr4_features;
	cr4_set_bits(mask);
}

T
Thomas Gleixner 已提交
168 169
static inline void __native_flush_tlb(void)
{
170 171 172 173 174 175
	/*
	 * If current->mm == NULL then we borrow a mm which may change during a
	 * task switch and therefore we must not be preempted while we write CR3
	 * back:
	 */
	preempt_disable();
176
	native_write_cr3(__native_read_cr3());
177
	preempt_enable();
T
Thomas Gleixner 已提交
178 179
}

180 181 182 183
static inline void __native_flush_tlb_global_irq_disabled(void)
{
	unsigned long cr4;

184
	cr4 = this_cpu_read(cpu_tlbstate.cr4);
185 186 187 188 189 190
	/* clear PGE */
	native_write_cr4(cr4 & ~X86_CR4_PGE);
	/* write old PGE again and flush TLBs */
	native_write_cr4(cr4);
}

T
Thomas Gleixner 已提交
191 192
static inline void __native_flush_tlb_global(void)
{
193
	unsigned long flags;
T
Thomas Gleixner 已提交
194

195 196 197 198 199 200 201 202 203
	if (static_cpu_has(X86_FEATURE_INVPCID)) {
		/*
		 * Using INVPCID is considerably faster than a pair of writes
		 * to CR4 sandwiched inside an IRQ flag save/restore.
		 */
		invpcid_flush_all();
		return;
	}

204 205 206 207 208 209 210
	/*
	 * Read-modify-write to CR4 - protect it from preemption and
	 * from interrupts. (Use the raw variant because this code can
	 * be called from deep inside debugging code.)
	 */
	raw_local_irq_save(flags);

211
	__native_flush_tlb_global_irq_disabled();
212 213

	raw_local_irq_restore(flags);
T
Thomas Gleixner 已提交
214 215 216 217
}

static inline void __native_flush_tlb_single(unsigned long addr)
{
218
	asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
T
Thomas Gleixner 已提交
219 220 221 222
}

static inline void __flush_tlb_all(void)
{
223
	if (boot_cpu_has(X86_FEATURE_PGE))
T
Thomas Gleixner 已提交
224 225 226 227 228 229 230
		__flush_tlb_global();
	else
		__flush_tlb();
}

static inline void __flush_tlb_one(unsigned long addr)
{
231
	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
232
	__flush_tlb_single(addr);
T
Thomas Gleixner 已提交
233 234
}

235
#define TLB_FLUSH_ALL	-1UL
T
Thomas Gleixner 已提交
236 237 238 239 240 241 242 243 244

/*
 * TLB flushing:
 *
 *  - flush_tlb_all() flushes all processes TLBs
 *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
 *  - flush_tlb_page(vma, vmaddr) flushes one page
 *  - flush_tlb_range(vma, start, end) flushes a range of pages
 *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
245
 *  - flush_tlb_others(cpumask, info) flushes TLBs on other cpus
T
Thomas Gleixner 已提交
246 247 248 249
 *
 * ..but the i386 has somewhat limited tlb flushing capabilities,
 * and page-granular flushes are available only on i486 and up.
 */
250 251 252 253 254 255
struct flush_tlb_info {
	struct mm_struct *mm;
	unsigned long start;
	unsigned long end;
};

T
Thomas Gleixner 已提交
256 257
#define local_flush_tlb() __flush_tlb()

258 259 260 261 262
#define flush_tlb_mm(mm)	flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)

#define flush_tlb_range(vma, start, end)	\
		flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)

T
Thomas Gleixner 已提交
263
extern void flush_tlb_all(void);
264 265
extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
				unsigned long end, unsigned long vmflag);
266
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
T
Thomas Gleixner 已提交
267

268 269 270 271 272
static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
{
	flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE);
}

273
void native_flush_tlb_others(const struct cpumask *cpumask,
274
			     const struct flush_tlb_info *info);
T
Thomas Gleixner 已提交
275 276 277 278

#define TLBSTATE_OK	1
#define TLBSTATE_LAZY	2

279 280 281
static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
					struct mm_struct *mm)
{
282
	inc_mm_tlb_gen(mm);
283 284 285 286 287
	cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
}

extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);

T
Thomas Gleixner 已提交
288
#ifndef CONFIG_PARAVIRT
289 290
#define flush_tlb_others(mask, info)	\
	native_flush_tlb_others(mask, info)
291
#endif
T
Thomas Gleixner 已提交
292

H
H. Peter Anvin 已提交
293
#endif /* _ASM_X86_TLBFLUSH_H */