tlb.c 8.8 KB
Newer Older
G
Glauber Costa 已提交
1 2 3 4 5 6
#include <linux/init.h>

#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
T
Tejun Heo 已提交
7
#include <linux/module.h>
8
#include <linux/cpu.h>
G
Glauber Costa 已提交
9 10 11

#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
12
#include <asm/cache.h>
T
Tejun Heo 已提交
13
#include <asm/apic.h>
T
Tejun Heo 已提交
14
#include <asm/uv/uv.h>
15
#include <linux/debugfs.h>
16

G
Glauber Costa 已提交
17 18 19 20 21 22 23 24 25 26 27
/*
 *	Smarter SMP flushing macros.
 *		c/o Linus Torvalds.
 *
 *	These mean you can really definitely utterly forget about
 *	writing to user space from interrupts. (Its not allowed anyway).
 *
 *	Optimizations Manfred Spraul <manfred@colorfullife.com>
 *
 *	More scalable flush, from Andi Kleen
 *
28
 *	Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
G
Glauber Costa 已提交
29 30
 */

31 32 33 34 35
struct flush_tlb_info {
	struct mm_struct *flush_mm;
	unsigned long flush_start;
	unsigned long flush_end;
};
36

G
Glauber Costa 已提交
37 38 39 40 41 42
/*
 * We cannot call mmdrop() because we are in interrupt context,
 * instead update mm->cpu_vm_mask.
 */
void leave_mm(int cpu)
{
43
	struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm);
44
	if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
G
Glauber Costa 已提交
45
		BUG();
46 47 48
	if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
		cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
		load_cr3(swapper_pg_dir);
49 50 51 52 53 54 55
		/*
		 * This gets called in the idle path where RCU
		 * functions differently.  Tracing normally
		 * uses RCU, so we have to call the tracepoint
		 * specially here.
		 */
		trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
56
	}
G
Glauber Costa 已提交
57 58 59 60 61 62 63 64
}
EXPORT_SYMBOL_GPL(leave_mm);

/*
 * The flush IPI assumes that a thread switch happens in this order:
 * [cpu0: the cpu that switches]
 * 1) switch_mm() either 1a) or 1b)
 * 1a) thread switch to a different mm
65 66 67 68
 * 1a1) set cpu_tlbstate to TLBSTATE_OK
 *	Now the tlb flush NMI handler flush_tlb_func won't call leave_mm
 *	if cpu0 was in lazy tlb mode.
 * 1a2) update cpu active_mm
G
Glauber Costa 已提交
69
 *	Now cpu0 accepts tlb flushes for the new mm.
70
 * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask);
G
Glauber Costa 已提交
71 72
 *	Now the other cpus will send tlb flush ipis.
 * 1a4) change cr3.
73 74 75 76
 * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask);
 *	Stop ipi delivery for the old mm. This is not synchronized with
 *	the other cpus, but flush_tlb_func ignore flush ipis for the wrong
 *	mm, and in the worst case we perform a superfluous tlb flush.
G
Glauber Costa 已提交
77
 * 1b) thread switch without mm change
78 79
 *	cpu active_mm is correct, cpu0 already handles flush ipis.
 * 1b1) set cpu_tlbstate to TLBSTATE_OK
G
Glauber Costa 已提交
80 81 82 83 84 85 86 87 88 89 90 91
 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
 *	Atomically set the bit [other cpus will start sending flush ipis],
 *	and test the bit.
 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
 * 2) switch %%esp, ie current
 *
 * The interrupt must handle 2 special cases:
 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
 *   runs in kernel space, the cpu could load tlb entries for user space
 *   pages.
 *
92
 * The good news is that cpu_tlbstate is local to each cpu, no
G
Glauber Costa 已提交
93 94 95 96
 * write/read ordering problems.
 */

/*
97
 * TLB flush funcation:
G
Glauber Costa 已提交
98 99
 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
 * 2) Leave the mm if we are in the lazy tlb mode.
T
Tejun Heo 已提交
100
 */
101
static void flush_tlb_func(void *info)
G
Glauber Costa 已提交
102
{
103
	struct flush_tlb_info *f = info;
G
Glauber Costa 已提交
104

105 106
	inc_irq_stat(irq_tlb_count);

107 108
	if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
		return;
D
Dave Hansen 已提交
109 110
	if (!f->flush_end)
		f->flush_end = f->flush_start + PAGE_SIZE;
G
Glauber Costa 已提交
111

112
	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
113
	if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
114
		if (f->flush_end == TLB_FLUSH_ALL) {
115
			local_flush_tlb();
116 117
			trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL);
		} else {
118
			unsigned long addr;
119
			unsigned long nr_pages =
120
				(f->flush_end - f->flush_start) / PAGE_SIZE;
121 122 123 124
			addr = f->flush_start;
			while (addr < f->flush_end) {
				__flush_tlb_single(addr);
				addr += PAGE_SIZE;
125
			}
126
			trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages);
127 128 129
		}
	} else
		leave_mm(smp_processor_id());
G
Glauber Costa 已提交
130 131 132

}

133
void native_flush_tlb_others(const struct cpumask *cpumask,
134 135
				 struct mm_struct *mm, unsigned long start,
				 unsigned long end)
136
{
137 138 139 140 141
	struct flush_tlb_info info;
	info.flush_mm = mm;
	info.flush_start = start;
	info.flush_end = end;

142
	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
143
	trace_tlb_flush(TLB_REMOTE_SEND_IPI, end - start);
144
	if (is_uv_system()) {
T
Tejun Heo 已提交
145
		unsigned int cpu;
146

147
		cpu = smp_processor_id();
148
		cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
T
Tejun Heo 已提交
149
		if (cpumask)
150 151
			smp_call_function_many(cpumask, flush_tlb_func,
								&info, 1);
152
		return;
153
	}
154
	smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
G
Glauber Costa 已提交
155 156 157 158 159 160 161 162
}

void flush_tlb_current_task(void)
{
	struct mm_struct *mm = current->mm;

	preempt_disable();

163
	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
G
Glauber Costa 已提交
164
	local_flush_tlb();
165
	trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
166
	if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
167
		flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
G
Glauber Costa 已提交
168 169 170
	preempt_enable();
}

171 172 173 174 175 176 177 178 179 180
/*
 * See Documentation/x86/tlb.txt for details.  We choose 33
 * because it is large enough to cover the vast majority (at
 * least 95%) of allocations, and is small enough that we are
 * confident it will not cause too much overhead.  Each single
 * flush is about 100 ns, so this caps the maximum overhead at
 * _about_ 3,000 ns.
 *
 * This is in units of pages.
 */
181
static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
182

183 184 185 186
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
				unsigned long end, unsigned long vmflag)
{
	unsigned long addr;
187 188
	/* do a global flush by default */
	unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
189 190

	preempt_disable();
191
	if (current->active_mm != mm)
192
		goto out;
193

194 195
	if (!current->mm) {
		leave_mm(smp_processor_id());
196
		goto out;
197
	}
G
Glauber Costa 已提交
198

199 200
	if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
		base_pages_to_flush = (end - start) >> PAGE_SHIFT;
201

202 203
	if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
		base_pages_to_flush = TLB_FLUSH_ALL;
204
		count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
205
		local_flush_tlb();
D
Dave Hansen 已提交
206
	} else {
207
		/* flush range by one by one 'invlpg' */
D
Dave Hansen 已提交
208
		for (addr = start; addr < end;	addr += PAGE_SIZE) {
209
			count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
210
			__flush_tlb_single(addr);
D
Dave Hansen 已提交
211
		}
212
	}
213
	trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush);
214
out:
215
	if (base_pages_to_flush == TLB_FLUSH_ALL) {
216 217 218
		start = 0UL;
		end = TLB_FLUSH_ALL;
	}
219
	if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
220
		flush_tlb_others(mm_cpumask(mm), mm, start, end);
G
Glauber Costa 已提交
221 222 223
	preempt_enable();
}

224
void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
G
Glauber Costa 已提交
225 226 227 228 229 230 231
{
	struct mm_struct *mm = vma->vm_mm;

	preempt_disable();

	if (current->active_mm == mm) {
		if (current->mm)
232
			__flush_tlb_one(start);
G
Glauber Costa 已提交
233 234 235 236
		else
			leave_mm(smp_processor_id());
	}

237
	if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
238
		flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);
G
Glauber Costa 已提交
239 240 241 242 243 244

	preempt_enable();
}

static void do_flush_tlb_all(void *info)
{
245
	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
G
Glauber Costa 已提交
246
	__flush_tlb_all();
247
	if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
248
		leave_mm(smp_processor_id());
G
Glauber Costa 已提交
249 250 251 252
}

void flush_tlb_all(void)
{
253
	count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
254
	on_each_cpu(do_flush_tlb_all, NULL, 1);
G
Glauber Costa 已提交
255
}
256

257 258 259 260 261 262
static void do_kernel_range_flush(void *info)
{
	struct flush_tlb_info *f = info;
	unsigned long addr;

	/* flush range by one by one 'invlpg' */
263
	for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE)
264 265 266 267 268 269 270
		__flush_tlb_single(addr);
}

void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{

	/* Balance as user space task's flush, a bit conservative */
271 272
	if (end == TLB_FLUSH_ALL ||
	    (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) {
273
		on_each_cpu(do_flush_tlb_all, NULL, 1);
274 275
	} else {
		struct flush_tlb_info info;
276 277 278 279 280
		info.flush_start = start;
		info.flush_end = end;
		on_each_cpu(do_kernel_range_flush, &info, 1);
	}
}
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326

static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
			     size_t count, loff_t *ppos)
{
	char buf[32];
	unsigned int len;

	len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling);
	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}

static ssize_t tlbflush_write_file(struct file *file,
		 const char __user *user_buf, size_t count, loff_t *ppos)
{
	char buf[32];
	ssize_t len;
	int ceiling;

	len = min(count, sizeof(buf) - 1);
	if (copy_from_user(buf, user_buf, len))
		return -EFAULT;

	buf[len] = '\0';
	if (kstrtoint(buf, 0, &ceiling))
		return -EINVAL;

	if (ceiling < 0)
		return -EINVAL;

	tlb_single_page_flush_ceiling = ceiling;
	return count;
}

static const struct file_operations fops_tlbflush = {
	.read = tlbflush_read_file,
	.write = tlbflush_write_file,
	.llseek = default_llseek,
};

static int __init create_tlb_single_page_flush_ceiling(void)
{
	debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR,
			    arch_debugfs_dir, NULL, &fops_tlbflush);
	return 0;
}
late_initcall(create_tlb_single_page_flush_ceiling);