tlb.c 8.8 KB
Newer Older
G
Glauber Costa 已提交
1 2 3 4 5 6
#include <linux/init.h>

#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
T
Tejun Heo 已提交
7
#include <linux/module.h>
8
#include <linux/cpu.h>
G
Glauber Costa 已提交
9 10 11

#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
12
#include <asm/cache.h>
T
Tejun Heo 已提交
13
#include <asm/apic.h>
T
Tejun Heo 已提交
14
#include <asm/uv/uv.h>
15
#include <linux/debugfs.h>
16

17 18 19
DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
			= { &init_mm, 0, };

G
Glauber Costa 已提交
20 21 22 23 24 25 26 27 28 29 30
/*
 *	Smarter SMP flushing macros.
 *		c/o Linus Torvalds.
 *
 *	These mean you can really definitely utterly forget about
 *	writing to user space from interrupts. (Its not allowed anyway).
 *
 *	Optimizations Manfred Spraul <manfred@colorfullife.com>
 *
 *	More scalable flush, from Andi Kleen
 *
31
 *	Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
G
Glauber Costa 已提交
32 33
 */

34 35 36 37 38
struct flush_tlb_info {
	struct mm_struct *flush_mm;
	unsigned long flush_start;
	unsigned long flush_end;
};
39

G
Glauber Costa 已提交
40 41 42 43 44 45
/*
 * We cannot call mmdrop() because we are in interrupt context,
 * instead update mm->cpu_vm_mask.
 */
void leave_mm(int cpu)
{
46
	struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm);
47
	if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
G
Glauber Costa 已提交
48
		BUG();
49 50 51
	if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
		cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
		load_cr3(swapper_pg_dir);
52 53 54 55 56 57 58
		/*
		 * This gets called in the idle path where RCU
		 * functions differently.  Tracing normally
		 * uses RCU, so we have to call the tracepoint
		 * specially here.
		 */
		trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
59
	}
G
Glauber Costa 已提交
60 61 62 63 64 65 66 67
}
EXPORT_SYMBOL_GPL(leave_mm);

/*
 * The flush IPI assumes that a thread switch happens in this order:
 * [cpu0: the cpu that switches]
 * 1) switch_mm() either 1a) or 1b)
 * 1a) thread switch to a different mm
68 69 70 71
 * 1a1) set cpu_tlbstate to TLBSTATE_OK
 *	Now the tlb flush NMI handler flush_tlb_func won't call leave_mm
 *	if cpu0 was in lazy tlb mode.
 * 1a2) update cpu active_mm
G
Glauber Costa 已提交
72
 *	Now cpu0 accepts tlb flushes for the new mm.
73
 * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask);
G
Glauber Costa 已提交
74 75
 *	Now the other cpus will send tlb flush ipis.
 * 1a4) change cr3.
76 77 78 79
 * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask);
 *	Stop ipi delivery for the old mm. This is not synchronized with
 *	the other cpus, but flush_tlb_func ignore flush ipis for the wrong
 *	mm, and in the worst case we perform a superfluous tlb flush.
G
Glauber Costa 已提交
80
 * 1b) thread switch without mm change
81 82
 *	cpu active_mm is correct, cpu0 already handles flush ipis.
 * 1b1) set cpu_tlbstate to TLBSTATE_OK
G
Glauber Costa 已提交
83 84 85 86 87 88 89 90 91 92 93 94
 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
 *	Atomically set the bit [other cpus will start sending flush ipis],
 *	and test the bit.
 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
 * 2) switch %%esp, ie current
 *
 * The interrupt must handle 2 special cases:
 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
 *   runs in kernel space, the cpu could load tlb entries for user space
 *   pages.
 *
95
 * The good news is that cpu_tlbstate is local to each cpu, no
G
Glauber Costa 已提交
96 97 98 99
 * write/read ordering problems.
 */

/*
100
 * TLB flush funcation:
G
Glauber Costa 已提交
101 102
 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
 * 2) Leave the mm if we are in the lazy tlb mode.
T
Tejun Heo 已提交
103
 */
104
static void flush_tlb_func(void *info)
G
Glauber Costa 已提交
105
{
106
	struct flush_tlb_info *f = info;
G
Glauber Costa 已提交
107

108 109
	inc_irq_stat(irq_tlb_count);

110 111
	if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
		return;
D
Dave Hansen 已提交
112 113
	if (!f->flush_end)
		f->flush_end = f->flush_start + PAGE_SIZE;
G
Glauber Costa 已提交
114

115
	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
116
	if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
117
		if (f->flush_end == TLB_FLUSH_ALL) {
118
			local_flush_tlb();
119 120
			trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL);
		} else {
121
			unsigned long addr;
122 123
			unsigned long nr_pages =
				f->flush_end - f->flush_start / PAGE_SIZE;
124 125 126 127
			addr = f->flush_start;
			while (addr < f->flush_end) {
				__flush_tlb_single(addr);
				addr += PAGE_SIZE;
128
			}
129
			trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages);
130 131 132
		}
	} else
		leave_mm(smp_processor_id());
G
Glauber Costa 已提交
133 134 135

}

136
void native_flush_tlb_others(const struct cpumask *cpumask,
137 138
				 struct mm_struct *mm, unsigned long start,
				 unsigned long end)
139
{
140 141 142 143 144
	struct flush_tlb_info info;
	info.flush_mm = mm;
	info.flush_start = start;
	info.flush_end = end;

145
	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
146
	if (is_uv_system()) {
T
Tejun Heo 已提交
147
		unsigned int cpu;
148

149
		cpu = smp_processor_id();
150
		cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
T
Tejun Heo 已提交
151
		if (cpumask)
152 153
			smp_call_function_many(cpumask, flush_tlb_func,
								&info, 1);
154
		return;
155
	}
156
	smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
G
Glauber Costa 已提交
157 158 159 160 161 162 163 164
}

void flush_tlb_current_task(void)
{
	struct mm_struct *mm = current->mm;

	preempt_disable();

165
	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
G
Glauber Costa 已提交
166
	local_flush_tlb();
167
	trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
168
	if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
169
		flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
G
Glauber Costa 已提交
170 171 172
	preempt_enable();
}

173 174 175 176 177 178 179 180 181 182 183
/*
 * See Documentation/x86/tlb.txt for details.  We choose 33
 * because it is large enough to cover the vast majority (at
 * least 95%) of allocations, and is small enough that we are
 * confident it will not cause too much overhead.  Each single
 * flush is about 100 ns, so this caps the maximum overhead at
 * _about_ 3,000 ns.
 *
 * This is in units of pages.
 */
unsigned long tlb_single_page_flush_ceiling = 33;
184

185 186 187 188
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
				unsigned long end, unsigned long vmflag)
{
	unsigned long addr;
189 190
	/* do a global flush by default */
	unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
191 192

	preempt_disable();
193
	if (current->active_mm != mm)
194
		goto out;
195

196 197
	if (!current->mm) {
		leave_mm(smp_processor_id());
198
		goto out;
199
	}
G
Glauber Costa 已提交
200

201 202
	if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
		base_pages_to_flush = (end - start) >> PAGE_SHIFT;
203

204 205
	if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
		base_pages_to_flush = TLB_FLUSH_ALL;
206
		count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
207
		local_flush_tlb();
D
Dave Hansen 已提交
208
	} else {
209
		/* flush range by one by one 'invlpg' */
D
Dave Hansen 已提交
210
		for (addr = start; addr < end;	addr += PAGE_SIZE) {
211
			count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
212
			__flush_tlb_single(addr);
D
Dave Hansen 已提交
213
		}
214
	}
215
	trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush);
216
out:
217
	if (base_pages_to_flush == TLB_FLUSH_ALL) {
218 219 220
		start = 0UL;
		end = TLB_FLUSH_ALL;
	}
221
	if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
222
		flush_tlb_others(mm_cpumask(mm), mm, start, end);
G
Glauber Costa 已提交
223 224 225
	preempt_enable();
}

226
void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
G
Glauber Costa 已提交
227 228 229 230 231 232 233
{
	struct mm_struct *mm = vma->vm_mm;

	preempt_disable();

	if (current->active_mm == mm) {
		if (current->mm)
234
			__flush_tlb_one(start);
G
Glauber Costa 已提交
235 236 237 238
		else
			leave_mm(smp_processor_id());
	}

239
	if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
240
		flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);
G
Glauber Costa 已提交
241 242 243 244 245 246

	preempt_enable();
}

static void do_flush_tlb_all(void *info)
{
247
	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
G
Glauber Costa 已提交
248
	__flush_tlb_all();
249
	if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
250
		leave_mm(smp_processor_id());
G
Glauber Costa 已提交
251 252 253 254
}

void flush_tlb_all(void)
{
255
	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
256
	on_each_cpu(do_flush_tlb_all, NULL, 1);
G
Glauber Costa 已提交
257
}
258

259 260 261 262 263 264
static void do_kernel_range_flush(void *info)
{
	struct flush_tlb_info *f = info;
	unsigned long addr;

	/* flush range by one by one 'invlpg' */
265
	for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE)
266 267 268 269 270 271 272
		__flush_tlb_single(addr);
}

void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{

	/* Balance as user space task's flush, a bit conservative */
273 274
	if (end == TLB_FLUSH_ALL ||
	    (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) {
275
		on_each_cpu(do_flush_tlb_all, NULL, 1);
276 277
	} else {
		struct flush_tlb_info info;
278 279 280 281 282
		info.flush_start = start;
		info.flush_end = end;
		on_each_cpu(do_kernel_range_flush, &info, 1);
	}
}
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328

static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
			     size_t count, loff_t *ppos)
{
	char buf[32];
	unsigned int len;

	len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling);
	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}

static ssize_t tlbflush_write_file(struct file *file,
		 const char __user *user_buf, size_t count, loff_t *ppos)
{
	char buf[32];
	ssize_t len;
	int ceiling;

	len = min(count, sizeof(buf) - 1);
	if (copy_from_user(buf, user_buf, len))
		return -EFAULT;

	buf[len] = '\0';
	if (kstrtoint(buf, 0, &ceiling))
		return -EINVAL;

	if (ceiling < 0)
		return -EINVAL;

	tlb_single_page_flush_ceiling = ceiling;
	return count;
}

static const struct file_operations fops_tlbflush = {
	.read = tlbflush_read_file,
	.write = tlbflush_write_file,
	.llseek = default_llseek,
};

static int __init create_tlb_single_page_flush_ceiling(void)
{
	debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR,
			    arch_debugfs_dir, NULL, &fops_tlbflush);
	return 0;
}
late_initcall(create_tlb_single_page_flush_ceiling);