tlb-r4k.c 11.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
6
 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
L
Linus Torvalds 已提交
7 8 9 10
 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
 * Carsten Langgaard, carstenl@mips.com
 * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
 */
11
#include <linux/cpu_pm.h>
L
Linus Torvalds 已提交
12 13
#include <linux/init.h>
#include <linux/sched.h>
14
#include <linux/smp.h>
L
Linus Torvalds 已提交
15
#include <linux/mm.h>
D
David Daney 已提交
16
#include <linux/hugetlb.h>
17
#include <linux/module.h>
L
Linus Torvalds 已提交
18 19

#include <asm/cpu.h>
20
#include <asm/cpu-type.h>
L
Linus Torvalds 已提交
21 22 23
#include <asm/bootinfo.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
24
#include <asm/tlb.h>
R
Ralf Baechle 已提交
25
#include <asm/tlbmisc.h>
L
Linus Torvalds 已提交
26 27 28

extern void build_tlb_refill_handler(void);

29
/*
30 31
 * LOONGSON2/3 has a 4 entry itlb which is a subset of dtlb,
 * unfortunately, itlb is not totally transparent to software.
32
 */
33 34 35 36
static inline void flush_itlb(void)
{
	switch (current_cpu_type()) {
	case CPU_LOONGSON2:
37
	case CPU_LOONGSON3:
38 39 40 41 42 43
		write_c0_diag(4);
		break;
	default:
		break;
	}
}
44

45 46 47 48 49
static inline void flush_itlb_vm(struct vm_area_struct *vma)
{
	if (vma->vm_flags & VM_EXEC)
		flush_itlb();
}
50

L
Linus Torvalds 已提交
51 52 53 54
void local_flush_tlb_all(void)
{
	unsigned long flags;
	unsigned long old_ctx;
L
Leonid Yegoshin 已提交
55
	int entry, ftlbhighset;
L
Linus Torvalds 已提交
56

R
Ralf Baechle 已提交
57
	local_irq_save(flags);
L
Linus Torvalds 已提交
58 59 60 61 62 63 64 65
	/* Save old context and create impossible VPN2 value */
	old_ctx = read_c0_entryhi();
	write_c0_entrylo0(0);
	write_c0_entrylo1(0);

	entry = read_c0_wired();

	/* Blast 'em all away. */
L
Leonid Yegoshin 已提交
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
	if (cpu_has_tlbinv) {
		if (current_cpu_data.tlbsizevtlb) {
			write_c0_index(0);
			mtc0_tlbw_hazard();
			tlbinvf();  /* invalidate VTLB */
		}
		ftlbhighset = current_cpu_data.tlbsizevtlb +
			current_cpu_data.tlbsizeftlbsets;
		for (entry = current_cpu_data.tlbsizevtlb;
		     entry < ftlbhighset;
		     entry++) {
			write_c0_index(entry);
			mtc0_tlbw_hazard();
			tlbinvf();  /* invalidate one FTLB set */
		}
81 82 83 84 85 86 87 88 89
	} else {
		while (entry < current_cpu_data.tlbsize) {
			/* Make sure all entries differ. */
			write_c0_entryhi(UNIQUE_ENTRYHI(entry));
			write_c0_index(entry);
			mtc0_tlbw_hazard();
			tlb_write_indexed();
			entry++;
		}
L
Linus Torvalds 已提交
90 91 92
	}
	tlbw_use_hazard();
	write_c0_entryhi(old_ctx);
93
	flush_itlb();
R
Ralf Baechle 已提交
94
	local_irq_restore(flags);
L
Linus Torvalds 已提交
95
}
96
EXPORT_SYMBOL(local_flush_tlb_all);
L
Linus Torvalds 已提交
97

98 99
/* All entries common to a mm share an asid.  To effectively flush
   these entries, we just bump the asid. */
L
Linus Torvalds 已提交
100 101
void local_flush_tlb_mm(struct mm_struct *mm)
{
102 103 104
	int cpu;

	preempt_disable();
L
Linus Torvalds 已提交
105

106 107 108 109 110 111 112
	cpu = smp_processor_id();

	if (cpu_context(cpu, mm) != 0) {
		drop_mmu_context(mm, cpu);
	}

	preempt_enable();
L
Linus Torvalds 已提交
113 114 115 116 117 118 119 120 121
}

void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
	unsigned long end)
{
	struct mm_struct *mm = vma->vm_mm;
	int cpu = smp_processor_id();

	if (cpu_context(cpu, mm) != 0) {
G
Greg Ungerer 已提交
122
		unsigned long size, flags;
L
Linus Torvalds 已提交
123

R
Ralf Baechle 已提交
124
		local_irq_save(flags);
125 126 127
		start = round_down(start, PAGE_SIZE << 1);
		end = round_up(end, PAGE_SIZE << 1);
		size = (end - start) >> (PAGE_SHIFT + 1);
L
Leonid Yegoshin 已提交
128 129 130
		if (size <= (current_cpu_data.tlbsizeftlbsets ?
			     current_cpu_data.tlbsize / 8 :
			     current_cpu_data.tlbsize / 2)) {
L
Linus Torvalds 已提交
131 132 133 134 135 136 137
			int oldpid = read_c0_entryhi();
			int newpid = cpu_asid(cpu, mm);

			while (start < end) {
				int idx;

				write_c0_entryhi(start | newpid);
138
				start += (PAGE_SIZE << 1);
L
Linus Torvalds 已提交
139 140
				mtc0_tlbw_hazard();
				tlb_probe();
141
				tlb_probe_hazard();
L
Linus Torvalds 已提交
142 143 144 145 146 147
				idx = read_c0_index();
				write_c0_entrylo0(0);
				write_c0_entrylo1(0);
				if (idx < 0)
					continue;
				/* Make sure all entries differ. */
148
				write_c0_entryhi(UNIQUE_ENTRYHI(idx));
L
Linus Torvalds 已提交
149 150 151 152 153 154 155 156
				mtc0_tlbw_hazard();
				tlb_write_indexed();
			}
			tlbw_use_hazard();
			write_c0_entryhi(oldpid);
		} else {
			drop_mmu_context(mm, cpu);
		}
157
		flush_itlb();
R
Ralf Baechle 已提交
158
		local_irq_restore(flags);
L
Linus Torvalds 已提交
159 160 161 162 163
	}
}

void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
G
Greg Ungerer 已提交
164
	unsigned long size, flags;
L
Linus Torvalds 已提交
165

R
Ralf Baechle 已提交
166
	local_irq_save(flags);
L
Linus Torvalds 已提交
167 168
	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
	size = (size + 1) >> 1;
L
Leonid Yegoshin 已提交
169 170 171
	if (size <= (current_cpu_data.tlbsizeftlbsets ?
		     current_cpu_data.tlbsize / 8 :
		     current_cpu_data.tlbsize / 2)) {
L
Linus Torvalds 已提交
172 173 174 175 176 177 178 179 180 181 182 183 184
		int pid = read_c0_entryhi();

		start &= (PAGE_MASK << 1);
		end += ((PAGE_SIZE << 1) - 1);
		end &= (PAGE_MASK << 1);

		while (start < end) {
			int idx;

			write_c0_entryhi(start);
			start += (PAGE_SIZE << 1);
			mtc0_tlbw_hazard();
			tlb_probe();
185
			tlb_probe_hazard();
L
Linus Torvalds 已提交
186 187 188 189 190 191
			idx = read_c0_index();
			write_c0_entrylo0(0);
			write_c0_entrylo1(0);
			if (idx < 0)
				continue;
			/* Make sure all entries differ. */
192
			write_c0_entryhi(UNIQUE_ENTRYHI(idx));
L
Linus Torvalds 已提交
193 194 195 196 197 198 199 200
			mtc0_tlbw_hazard();
			tlb_write_indexed();
		}
		tlbw_use_hazard();
		write_c0_entryhi(pid);
	} else {
		local_flush_tlb_all();
	}
201
	flush_itlb();
R
Ralf Baechle 已提交
202
	local_irq_restore(flags);
L
Linus Torvalds 已提交
203 204 205 206 207 208 209 210 211 212 213 214
}

void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
	int cpu = smp_processor_id();

	if (cpu_context(cpu, vma->vm_mm) != 0) {
		unsigned long flags;
		int oldpid, newpid, idx;

		newpid = cpu_asid(cpu, vma->vm_mm);
		page &= (PAGE_MASK << 1);
R
Ralf Baechle 已提交
215
		local_irq_save(flags);
L
Linus Torvalds 已提交
216 217 218 219
		oldpid = read_c0_entryhi();
		write_c0_entryhi(page | newpid);
		mtc0_tlbw_hazard();
		tlb_probe();
220
		tlb_probe_hazard();
L
Linus Torvalds 已提交
221 222 223 224 225 226
		idx = read_c0_index();
		write_c0_entrylo0(0);
		write_c0_entrylo1(0);
		if (idx < 0)
			goto finish;
		/* Make sure all entries differ. */
227
		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
L
Linus Torvalds 已提交
228 229 230 231 232 233
		mtc0_tlbw_hazard();
		tlb_write_indexed();
		tlbw_use_hazard();

	finish:
		write_c0_entryhi(oldpid);
234
		flush_itlb_vm(vma);
R
Ralf Baechle 已提交
235
		local_irq_restore(flags);
L
Linus Torvalds 已提交
236 237 238 239 240 241 242 243 244 245 246 247
	}
}

/*
 * This one is only used for pages with the global bit set so we don't care
 * much about the ASID.
 */
void local_flush_tlb_one(unsigned long page)
{
	unsigned long flags;
	int oldpid, idx;

R
Ralf Baechle 已提交
248
	local_irq_save(flags);
L
Linus Torvalds 已提交
249
	oldpid = read_c0_entryhi();
250
	page &= (PAGE_MASK << 1);
L
Linus Torvalds 已提交
251 252 253
	write_c0_entryhi(page);
	mtc0_tlbw_hazard();
	tlb_probe();
254
	tlb_probe_hazard();
L
Linus Torvalds 已提交
255 256 257 258 259
	idx = read_c0_index();
	write_c0_entrylo0(0);
	write_c0_entrylo1(0);
	if (idx >= 0) {
		/* Make sure all entries differ. */
260
		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
L
Linus Torvalds 已提交
261 262 263 264 265
		mtc0_tlbw_hazard();
		tlb_write_indexed();
		tlbw_use_hazard();
	}
	write_c0_entryhi(oldpid);
266
	flush_itlb();
R
Ralf Baechle 已提交
267
	local_irq_restore(flags);
L
Linus Torvalds 已提交
268 269 270 271 272 273 274 275 276 277 278
}

/*
 * We will need multiple versions of update_mmu_cache(), one that just
 * updates the TLB with the new pte(s), and another which also checks
 * for the R4k "end of page" hardware bug and does the needy.
 */
void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
{
	unsigned long flags;
	pgd_t *pgdp;
279
	pud_t *pudp;
L
Linus Torvalds 已提交
280 281 282 283 284 285 286 287 288 289
	pmd_t *pmdp;
	pte_t *ptep;
	int idx, pid;

	/*
	 * Handle debugger faulting in for debugee.
	 */
	if (current->active_mm != vma->vm_mm)
		return;

R
Ralf Baechle 已提交
290
	local_irq_save(flags);
291

292
	pid = read_c0_entryhi() & ASID_MASK;
L
Linus Torvalds 已提交
293 294 295 296 297
	address &= (PAGE_MASK << 1);
	write_c0_entryhi(address | pid);
	pgdp = pgd_offset(vma->vm_mm, address);
	mtc0_tlbw_hazard();
	tlb_probe();
298
	tlb_probe_hazard();
299 300
	pudp = pud_offset(pgdp, address);
	pmdp = pmd_offset(pudp, address);
L
Linus Torvalds 已提交
301
	idx = read_c0_index();
302
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
D
David Daney 已提交
303 304 305 306 307
	/* this could be a huge page  */
	if (pmd_huge(*pmdp)) {
		unsigned long lo;
		write_c0_pagemask(PM_HUGE_MASK);
		ptep = (pte_t *)pmdp;
308
		lo = pte_to_entrylo(pte_val(*ptep));
D
David Daney 已提交
309 310 311 312 313 314 315 316
		write_c0_entrylo0(lo);
		write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));

		mtc0_tlbw_hazard();
		if (idx < 0)
			tlb_write_random();
		else
			tlb_write_indexed();
317
		tlbw_use_hazard();
D
David Daney 已提交
318 319 320 321 322
		write_c0_pagemask(PM_DEFAULT_MASK);
	} else
#endif
	{
		ptep = pte_offset_map(pmdp, address);
L
Linus Torvalds 已提交
323

324
#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
D
David Daney 已提交
325 326 327
		write_c0_entrylo0(ptep->pte_high);
		ptep++;
		write_c0_entrylo1(ptep->pte_high);
L
Linus Torvalds 已提交
328
#else
329 330
		write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
		write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
L
Linus Torvalds 已提交
331
#endif
D
David Daney 已提交
332 333 334 335 336 337
		mtc0_tlbw_hazard();
		if (idx < 0)
			tlb_write_random();
		else
			tlb_write_indexed();
	}
L
Linus Torvalds 已提交
338
	tlbw_use_hazard();
339
	flush_itlb_vm(vma);
R
Ralf Baechle 已提交
340
	local_irq_restore(flags);
L
Linus Torvalds 已提交
341 342
}

343 344
void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
		     unsigned long entryhi, unsigned long pagemask)
L
Linus Torvalds 已提交
345 346 347 348 349 350
{
	unsigned long flags;
	unsigned long wired;
	unsigned long old_pagemask;
	unsigned long old_ctx;

R
Ralf Baechle 已提交
351
	local_irq_save(flags);
L
Linus Torvalds 已提交
352 353 354 355 356 357
	/* Save old context and create impossible VPN2 value */
	old_ctx = read_c0_entryhi();
	old_pagemask = read_c0_pagemask();
	wired = read_c0_wired();
	write_c0_wired(wired + 1);
	write_c0_index(wired);
358
	tlbw_use_hazard();	/* What is the hazard here? */
L
Linus Torvalds 已提交
359 360 361 362 363 364 365 366 367
	write_c0_pagemask(pagemask);
	write_c0_entryhi(entryhi);
	write_c0_entrylo0(entrylo0);
	write_c0_entrylo1(entrylo1);
	mtc0_tlbw_hazard();
	tlb_write_indexed();
	tlbw_use_hazard();

	write_c0_entryhi(old_ctx);
368
	tlbw_use_hazard();	/* What is the hazard here? */
L
Linus Torvalds 已提交
369 370
	write_c0_pagemask(old_pagemask);
	local_flush_tlb_all();
R
Ralf Baechle 已提交
371
	local_irq_restore(flags);
L
Linus Torvalds 已提交
372 373
}

374 375 376 377 378 379 380
#ifdef CONFIG_TRANSPARENT_HUGEPAGE

int __init has_transparent_hugepage(void)
{
	unsigned int mask;
	unsigned long flags;

R
Ralf Baechle 已提交
381
	local_irq_save(flags);
382 383 384 385 386
	write_c0_pagemask(PM_HUGE_MASK);
	back_to_back_c0_hazard();
	mask = read_c0_pagemask();
	write_c0_pagemask(PM_DEFAULT_MASK);

R
Ralf Baechle 已提交
387
	local_irq_restore(flags);
388 389 390 391 392 393

	return mask == PM_HUGE_MASK;
}

#endif /* CONFIG_TRANSPARENT_HUGEPAGE  */

394 395 396 397 398 399
/*
 * Used for loading TLB entries before trap_init() has started, when we
 * don't actually want to add a wired entry which remains throughout the
 * lifetime of the system
 */

400
int temp_tlb_entry __cpuinitdata;
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438

__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
			       unsigned long entryhi, unsigned long pagemask)
{
	int ret = 0;
	unsigned long flags;
	unsigned long wired;
	unsigned long old_pagemask;
	unsigned long old_ctx;

	local_irq_save(flags);
	/* Save old context and create impossible VPN2 value */
	old_ctx = read_c0_entryhi();
	old_pagemask = read_c0_pagemask();
	wired = read_c0_wired();
	if (--temp_tlb_entry < wired) {
		printk(KERN_WARNING
		       "No TLB space left for add_temporary_entry\n");
		ret = -ENOSPC;
		goto out;
	}

	write_c0_index(temp_tlb_entry);
	write_c0_pagemask(pagemask);
	write_c0_entryhi(entryhi);
	write_c0_entrylo0(entrylo0);
	write_c0_entrylo1(entrylo1);
	mtc0_tlbw_hazard();
	tlb_write_indexed();
	tlbw_use_hazard();

	write_c0_entryhi(old_ctx);
	write_c0_pagemask(old_pagemask);
out:
	local_irq_restore(flags);
	return ret;
}

439
static int ntlb;
440 441 442 443 444 445 446 447
static int __init set_ntlb(char *str)
{
	get_option(&str, &ntlb);
	return 1;
}

__setup("ntlb=", set_ntlb);

448 449 450 451
/*
 * Configure TLB (for init or after a CPU has been powered off).
 */
static void r4k_tlb_configure(void)
L
Linus Torvalds 已提交
452 453 454 455 456 457
{
	/*
	 * You should never change this register:
	 *   - On R4600 1.7 the tlbp never hits for pages smaller than
	 *     the value in the c0_pagemask register.
	 *   - The entire mm handling assumes the c0_pagemask register to
T
Thiemo Seufer 已提交
458
	 *     be set to fixed-size pages.
L
Linus Torvalds 已提交
459 460 461
	 */
	write_c0_pagemask(PM_DEFAULT_MASK);
	write_c0_wired(0);
462 463 464 465
	if (current_cpu_type() == CPU_R10000 ||
	    current_cpu_type() == CPU_R12000 ||
	    current_cpu_type() == CPU_R14000)
		write_c0_framemask(0);
466

467
	if (cpu_has_rixi) {
468 469 470 471 472 473 474 475 476 477 478
		/*
		 * Enable the no read, no exec bits, and enable large virtual
		 * address.
		 */
		u32 pg = PG_RIE | PG_XIE;
#ifdef CONFIG_64BIT
		pg |= PG_ELPA;
#endif
		write_c0_pagegrain(pg);
	}

479 480
	temp_tlb_entry = current_cpu_data.tlbsize - 1;

R
Ralf Baechle 已提交
481
	/* From this point on the ARC firmware is dead.	 */
L
Linus Torvalds 已提交
482 483
	local_flush_tlb_all();

T
Thiemo Seufer 已提交
484
	/* Did I tell you that ARC SUCKS?  */
485 486 487 488 489
}

void tlb_init(void)
{
	r4k_tlb_configure();
T
Thiemo Seufer 已提交
490

491 492 493 494 495
	if (ntlb) {
		if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
			int wired = current_cpu_data.tlbsize - ntlb;
			write_c0_wired(wired);
			write_c0_index(wired-1);
496
			printk("Restricting TLB to %d entries\n", ntlb);
497 498 499 500
		} else
			printk("Ignoring invalid argument ntlb=%d\n", ntlb);
	}

L
Linus Torvalds 已提交
501 502
	build_tlb_refill_handler();
}
503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525

static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd,
			       void *v)
{
	switch (cmd) {
	case CPU_PM_ENTER_FAILED:
	case CPU_PM_EXIT:
		r4k_tlb_configure();
		break;
	}

	return NOTIFY_OK;
}

static struct notifier_block r4k_tlb_pm_notifier_block = {
	.notifier_call = r4k_tlb_pm_notifier,
};

static int __init r4k_tlb_init_pm(void)
{
	return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block);
}
arch_initcall(r4k_tlb_init_pm);