tlb-r4k.c 10.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
6
 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
L
Linus Torvalds 已提交
7 8 9 10 11 12
 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
 * Carsten Langgaard, carstenl@mips.com
 * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
 */
#include <linux/init.h>
#include <linux/sched.h>
13
#include <linux/smp.h>
L
Linus Torvalds 已提交
14
#include <linux/mm.h>
D
David Daney 已提交
15
#include <linux/hugetlb.h>
L
Linus Torvalds 已提交
16 17 18 19 20

#include <asm/cpu.h>
#include <asm/bootinfo.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
R
Ralf Baechle 已提交
21
#include <asm/tlbmisc.h>
L
Linus Torvalds 已提交
22 23 24

extern void build_tlb_refill_handler(void);

25 26 27 28 29 30
/*
 * Make sure all entries differ.  If they're not different
 * MIPS32 will take revenge ...
 */
#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))

31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
/* Atomicity and interruptability */
#ifdef CONFIG_MIPS_MT_SMTC

#include <asm/smtc.h>
#include <asm/mipsmtregs.h>

#define ENTER_CRITICAL(flags) \
	{ \
	unsigned int mvpflags; \
	local_irq_save(flags);\
	mvpflags = dvpe()
#define EXIT_CRITICAL(flags) \
	evpe(mvpflags); \
	local_irq_restore(flags); \
	}
#else

#define ENTER_CRITICAL(flags) local_irq_save(flags)
#define EXIT_CRITICAL(flags) local_irq_restore(flags)

#endif /* CONFIG_MIPS_MT_SMTC */

53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
#if defined(CONFIG_CPU_LOONGSON2)
/*
 * LOONGSON2 has a 4 entry itlb which is a subset of dtlb,
 * unfortrunately, itlb is not totally transparent to software.
 */
#define FLUSH_ITLB write_c0_diag(4);

#define FLUSH_ITLB_VM(vma) { if ((vma)->vm_flags & VM_EXEC)  write_c0_diag(4); }

#else

#define FLUSH_ITLB
#define FLUSH_ITLB_VM(vma)

#endif

L
Linus Torvalds 已提交
69 70 71 72 73 74
void local_flush_tlb_all(void)
{
	unsigned long flags;
	unsigned long old_ctx;
	int entry;

75
	ENTER_CRITICAL(flags);
L
Linus Torvalds 已提交
76 77 78 79 80 81 82 83 84
	/* Save old context and create impossible VPN2 value */
	old_ctx = read_c0_entryhi();
	write_c0_entrylo0(0);
	write_c0_entrylo1(0);

	entry = read_c0_wired();

	/* Blast 'em all away. */
	while (entry < current_cpu_data.tlbsize) {
85 86
		/* Make sure all entries differ. */
		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
L
Linus Torvalds 已提交
87 88 89 90 91 92 93
		write_c0_index(entry);
		mtc0_tlbw_hazard();
		tlb_write_indexed();
		entry++;
	}
	tlbw_use_hazard();
	write_c0_entryhi(old_ctx);
94
	FLUSH_ITLB;
95
	EXIT_CRITICAL(flags);
L
Linus Torvalds 已提交
96 97
}

98 99
/* All entries common to a mm share an asid.  To effectively flush
   these entries, we just bump the asid. */
L
Linus Torvalds 已提交
100 101
void local_flush_tlb_mm(struct mm_struct *mm)
{
102 103 104
	int cpu;

	preempt_disable();
L
Linus Torvalds 已提交
105

106 107 108 109 110 111 112
	cpu = smp_processor_id();

	if (cpu_context(cpu, mm) != 0) {
		drop_mmu_context(mm, cpu);
	}

	preempt_enable();
L
Linus Torvalds 已提交
113 114 115 116 117 118 119 120 121
}

void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
	unsigned long end)
{
	struct mm_struct *mm = vma->vm_mm;
	int cpu = smp_processor_id();

	if (cpu_context(cpu, mm) != 0) {
G
Greg Ungerer 已提交
122
		unsigned long size, flags;
H
Hillf Danton 已提交
123
		int huge = is_vm_hugetlb_page(vma);
L
Linus Torvalds 已提交
124

125
		ENTER_CRITICAL(flags);
H
Hillf Danton 已提交
126 127 128 129 130 131 132 133 134
		if (huge) {
			start = round_down(start, HPAGE_SIZE);
			end = round_up(end, HPAGE_SIZE);
			size = (end - start) >> HPAGE_SHIFT;
		} else {
			start = round_down(start, PAGE_SIZE << 1);
			end = round_up(end, PAGE_SIZE << 1);
			size = (end - start) >> (PAGE_SHIFT + 1);
		}
L
Linus Torvalds 已提交
135 136 137 138 139 140 141 142
		if (size <= current_cpu_data.tlbsize/2) {
			int oldpid = read_c0_entryhi();
			int newpid = cpu_asid(cpu, mm);

			while (start < end) {
				int idx;

				write_c0_entryhi(start | newpid);
H
Hillf Danton 已提交
143 144 145 146
				if (huge)
					start += HPAGE_SIZE;
				else
					start += (PAGE_SIZE << 1);
L
Linus Torvalds 已提交
147 148
				mtc0_tlbw_hazard();
				tlb_probe();
149
				tlb_probe_hazard();
L
Linus Torvalds 已提交
150 151 152 153 154 155
				idx = read_c0_index();
				write_c0_entrylo0(0);
				write_c0_entrylo1(0);
				if (idx < 0)
					continue;
				/* Make sure all entries differ. */
156
				write_c0_entryhi(UNIQUE_ENTRYHI(idx));
L
Linus Torvalds 已提交
157 158 159 160 161 162 163 164
				mtc0_tlbw_hazard();
				tlb_write_indexed();
			}
			tlbw_use_hazard();
			write_c0_entryhi(oldpid);
		} else {
			drop_mmu_context(mm, cpu);
		}
165
		FLUSH_ITLB;
166
		EXIT_CRITICAL(flags);
L
Linus Torvalds 已提交
167 168 169 170 171
	}
}

void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
G
Greg Ungerer 已提交
172
	unsigned long size, flags;
L
Linus Torvalds 已提交
173

174
	ENTER_CRITICAL(flags);
L
Linus Torvalds 已提交
175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
	size = (size + 1) >> 1;
	if (size <= current_cpu_data.tlbsize / 2) {
		int pid = read_c0_entryhi();

		start &= (PAGE_MASK << 1);
		end += ((PAGE_SIZE << 1) - 1);
		end &= (PAGE_MASK << 1);

		while (start < end) {
			int idx;

			write_c0_entryhi(start);
			start += (PAGE_SIZE << 1);
			mtc0_tlbw_hazard();
			tlb_probe();
191
			tlb_probe_hazard();
L
Linus Torvalds 已提交
192 193 194 195 196 197
			idx = read_c0_index();
			write_c0_entrylo0(0);
			write_c0_entrylo1(0);
			if (idx < 0)
				continue;
			/* Make sure all entries differ. */
198
			write_c0_entryhi(UNIQUE_ENTRYHI(idx));
L
Linus Torvalds 已提交
199 200 201 202 203 204 205 206
			mtc0_tlbw_hazard();
			tlb_write_indexed();
		}
		tlbw_use_hazard();
		write_c0_entryhi(pid);
	} else {
		local_flush_tlb_all();
	}
207
	FLUSH_ITLB;
208
	EXIT_CRITICAL(flags);
L
Linus Torvalds 已提交
209 210 211 212 213 214 215 216 217 218 219 220
}

void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
	int cpu = smp_processor_id();

	if (cpu_context(cpu, vma->vm_mm) != 0) {
		unsigned long flags;
		int oldpid, newpid, idx;

		newpid = cpu_asid(cpu, vma->vm_mm);
		page &= (PAGE_MASK << 1);
221
		ENTER_CRITICAL(flags);
L
Linus Torvalds 已提交
222 223 224 225
		oldpid = read_c0_entryhi();
		write_c0_entryhi(page | newpid);
		mtc0_tlbw_hazard();
		tlb_probe();
226
		tlb_probe_hazard();
L
Linus Torvalds 已提交
227 228 229 230 231 232
		idx = read_c0_index();
		write_c0_entrylo0(0);
		write_c0_entrylo1(0);
		if (idx < 0)
			goto finish;
		/* Make sure all entries differ. */
233
		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
L
Linus Torvalds 已提交
234 235 236 237 238 239
		mtc0_tlbw_hazard();
		tlb_write_indexed();
		tlbw_use_hazard();

	finish:
		write_c0_entryhi(oldpid);
240
		FLUSH_ITLB_VM(vma);
241
		EXIT_CRITICAL(flags);
L
Linus Torvalds 已提交
242 243 244 245 246 247 248 249 250 251 252 253
	}
}

/*
 * This one is only used for pages with the global bit set so we don't care
 * much about the ASID.
 */
void local_flush_tlb_one(unsigned long page)
{
	unsigned long flags;
	int oldpid, idx;

254
	ENTER_CRITICAL(flags);
L
Linus Torvalds 已提交
255
	oldpid = read_c0_entryhi();
256
	page &= (PAGE_MASK << 1);
L
Linus Torvalds 已提交
257 258 259
	write_c0_entryhi(page);
	mtc0_tlbw_hazard();
	tlb_probe();
260
	tlb_probe_hazard();
L
Linus Torvalds 已提交
261 262 263 264 265
	idx = read_c0_index();
	write_c0_entrylo0(0);
	write_c0_entrylo1(0);
	if (idx >= 0) {
		/* Make sure all entries differ. */
266
		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
L
Linus Torvalds 已提交
267 268 269 270 271
		mtc0_tlbw_hazard();
		tlb_write_indexed();
		tlbw_use_hazard();
	}
	write_c0_entryhi(oldpid);
272
	FLUSH_ITLB;
273
	EXIT_CRITICAL(flags);
L
Linus Torvalds 已提交
274 275 276 277 278 279 280 281 282 283 284
}

/*
 * We will need multiple versions of update_mmu_cache(), one that just
 * updates the TLB with the new pte(s), and another which also checks
 * for the R4k "end of page" hardware bug and does the needy.
 */
void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
{
	unsigned long flags;
	pgd_t *pgdp;
285
	pud_t *pudp;
L
Linus Torvalds 已提交
286 287 288 289 290 291 292 293 294 295
	pmd_t *pmdp;
	pte_t *ptep;
	int idx, pid;

	/*
	 * Handle debugger faulting in for debugee.
	 */
	if (current->active_mm != vma->vm_mm)
		return;

296
	ENTER_CRITICAL(flags);
297 298

	pid = read_c0_entryhi() & ASID_MASK;
L
Linus Torvalds 已提交
299 300 301 302 303
	address &= (PAGE_MASK << 1);
	write_c0_entryhi(address | pid);
	pgdp = pgd_offset(vma->vm_mm, address);
	mtc0_tlbw_hazard();
	tlb_probe();
304
	tlb_probe_hazard();
305 306
	pudp = pud_offset(pgdp, address);
	pmdp = pmd_offset(pudp, address);
L
Linus Torvalds 已提交
307
	idx = read_c0_index();
308
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
D
David Daney 已提交
309 310 311 312 313
	/* this could be a huge page  */
	if (pmd_huge(*pmdp)) {
		unsigned long lo;
		write_c0_pagemask(PM_HUGE_MASK);
		ptep = (pte_t *)pmdp;
314
		lo = pte_to_entrylo(pte_val(*ptep));
D
David Daney 已提交
315 316 317 318 319 320 321 322
		write_c0_entrylo0(lo);
		write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));

		mtc0_tlbw_hazard();
		if (idx < 0)
			tlb_write_random();
		else
			tlb_write_indexed();
323
		tlbw_use_hazard();
D
David Daney 已提交
324 325 326 327 328
		write_c0_pagemask(PM_DEFAULT_MASK);
	} else
#endif
	{
		ptep = pte_offset_map(pmdp, address);
L
Linus Torvalds 已提交
329

330
#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
D
David Daney 已提交
331 332 333
		write_c0_entrylo0(ptep->pte_high);
		ptep++;
		write_c0_entrylo1(ptep->pte_high);
L
Linus Torvalds 已提交
334
#else
335 336
		write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
		write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
L
Linus Torvalds 已提交
337
#endif
D
David Daney 已提交
338 339 340 341 342 343
		mtc0_tlbw_hazard();
		if (idx < 0)
			tlb_write_random();
		else
			tlb_write_indexed();
	}
L
Linus Torvalds 已提交
344
	tlbw_use_hazard();
345
	FLUSH_ITLB_VM(vma);
346
	EXIT_CRITICAL(flags);
L
Linus Torvalds 已提交
347 348
}

349 350
void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
		     unsigned long entryhi, unsigned long pagemask)
L
Linus Torvalds 已提交
351 352 353 354 355 356
{
	unsigned long flags;
	unsigned long wired;
	unsigned long old_pagemask;
	unsigned long old_ctx;

357
	ENTER_CRITICAL(flags);
L
Linus Torvalds 已提交
358 359 360 361 362 363
	/* Save old context and create impossible VPN2 value */
	old_ctx = read_c0_entryhi();
	old_pagemask = read_c0_pagemask();
	wired = read_c0_wired();
	write_c0_wired(wired + 1);
	write_c0_index(wired);
364
	tlbw_use_hazard();	/* What is the hazard here? */
L
Linus Torvalds 已提交
365 366 367 368 369 370 371 372 373
	write_c0_pagemask(pagemask);
	write_c0_entryhi(entryhi);
	write_c0_entrylo0(entrylo0);
	write_c0_entrylo1(entrylo1);
	mtc0_tlbw_hazard();
	tlb_write_indexed();
	tlbw_use_hazard();

	write_c0_entryhi(old_ctx);
374
	tlbw_use_hazard();	/* What is the hazard here? */
L
Linus Torvalds 已提交
375 376
	write_c0_pagemask(old_pagemask);
	local_flush_tlb_all();
377
	EXIT_CRITICAL(flags);
L
Linus Torvalds 已提交
378 379
}

380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
#ifdef CONFIG_TRANSPARENT_HUGEPAGE

int __init has_transparent_hugepage(void)
{
	unsigned int mask;
	unsigned long flags;

	ENTER_CRITICAL(flags);
	write_c0_pagemask(PM_HUGE_MASK);
	back_to_back_c0_hazard();
	mask = read_c0_pagemask();
	write_c0_pagemask(PM_DEFAULT_MASK);

	EXIT_CRITICAL(flags);

	return mask == PM_HUGE_MASK;
}

#endif /* CONFIG_TRANSPARENT_HUGEPAGE  */

400
static int __cpuinitdata ntlb;
401 402 403 404 405 406 407 408
static int __init set_ntlb(char *str)
{
	get_option(&str, &ntlb);
	return 1;
}

__setup("ntlb=", set_ntlb);

409
void __cpuinit tlb_init(void)
L
Linus Torvalds 已提交
410 411 412 413 414 415
{
	/*
	 * You should never change this register:
	 *   - On R4600 1.7 the tlbp never hits for pages smaller than
	 *     the value in the c0_pagemask register.
	 *   - The entire mm handling assumes the c0_pagemask register to
T
Thiemo Seufer 已提交
416
	 *     be set to fixed-size pages.
L
Linus Torvalds 已提交
417 418 419
	 */
	write_c0_pagemask(PM_DEFAULT_MASK);
	write_c0_wired(0);
420 421 422 423
	if (current_cpu_type() == CPU_R10000 ||
	    current_cpu_type() == CPU_R12000 ||
	    current_cpu_type() == CPU_R14000)
		write_c0_framemask(0);
424

425
	if (cpu_has_rixi) {
426 427 428 429 430 431 432 433 434 435 436
		/*
		 * Enable the no read, no exec bits, and enable large virtual
		 * address.
		 */
		u32 pg = PG_RIE | PG_XIE;
#ifdef CONFIG_64BIT
		pg |= PG_ELPA;
#endif
		write_c0_pagegrain(pg);
	}

T
Thiemo Seufer 已提交
437
        /* From this point on the ARC firmware is dead.  */
L
Linus Torvalds 已提交
438 439
	local_flush_tlb_all();

T
Thiemo Seufer 已提交
440 441
	/* Did I tell you that ARC SUCKS?  */

442 443 444 445 446
	if (ntlb) {
		if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
			int wired = current_cpu_data.tlbsize - ntlb;
			write_c0_wired(wired);
			write_c0_index(wired-1);
447
			printk("Restricting TLB to %d entries\n", ntlb);
448 449 450 451
		} else
			printk("Ignoring invalid argument ntlb=%d\n", ntlb);
	}

L
Linus Torvalds 已提交
452 453
	build_tlb_refill_handler();
}