tlb.c 13.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * TLB support routines.
 *
 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
 *	David Mosberger-Tang <davidm@hpl.hp.com>
 *
 * 08/02/00 A. Mallick <asit.k.mallick@intel.com>
 *		Modified RID allocation for SMP
 *          Goutham Rao <goutham.rao@intel.com>
 *              IPI based ptc implementation and A-step IPI implementation.
11 12
 * Rohit Seth <rohit.seth@intel.com>
 * Ken Chen <kenneth.w.chen@intel.com>
13
 * Christophe de Dinechin <ddd@hp.com>: Avoid ptc.e on memory allocation
14 15 16
 * Copyright (C) 2007 Intel Corp
 *	Fenghua Yu <fenghua.yu@intel.com>
 *	Add multiple ptc.g/ptc.ga instruction support in global tlb purge.
L
Linus Torvalds 已提交
17 18 19 20 21 22 23
 */
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/mm.h>
24
#include <linux/bootmem.h>
25
#include <linux/slab.h>
L
Linus Torvalds 已提交
26 27 28 29 30 31

#include <asm/delay.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
#include <asm/pal.h>
#include <asm/tlbflush.h>
32
#include <asm/dma.h>
33
#include <asm/processor.h>
34
#include <asm/sal.h>
35
#include <asm/tlb.h>
L
Linus Torvalds 已提交
36 37

static struct {
38
	u64 mask;		/* mask of supported purge page-sizes */
39
	unsigned long max_bits;	/* log2 of largest supported purge page-size */
L
Linus Torvalds 已提交
40 41 42
} purge;

struct ia64_ctx ia64_ctx = {
43 44 45
	.lock =	__SPIN_LOCK_UNLOCKED(ia64_ctx.lock),
	.next =	1,
	.max_ctx = ~0U
L
Linus Torvalds 已提交
46 47 48
};

DEFINE_PER_CPU(u8, ia64_need_tlb_flush);
49 50 51
DEFINE_PER_CPU(u8, ia64_tr_num);  /*Number of TR slots in current processor*/
DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/

52
struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
L
Linus Torvalds 已提交
53

54 55 56 57 58 59 60 61 62 63 64 65
/*
 * Initializes the ia64_ctx.bitmap array based on max_ctx+1.
 * Called after cpu_init() has setup ia64_ctx.max_ctx based on
 * maximum RID that is supported by boot CPU.
 */
void __init
mmu_context_init (void)
{
	ia64_ctx.bitmap = alloc_bootmem((ia64_ctx.max_ctx+1)>>3);
	ia64_ctx.flushmap = alloc_bootmem((ia64_ctx.max_ctx+1)>>3);
}

L
Linus Torvalds 已提交
66 67 68 69 70 71
/*
 * Acquire the ia64_ctx.lock before calling this function!
 */
void
wrap_mmu_context (struct mm_struct *mm)
{
72
	int i, cpu;
73
	unsigned long flush_bit;
L
Linus Torvalds 已提交
74

75 76 77
	for (i=0; i <= ia64_ctx.max_ctx / BITS_PER_LONG; i++) {
		flush_bit = xchg(&ia64_ctx.flushmap[i], 0);
		ia64_ctx.bitmap[i] ^= flush_bit;
L
Linus Torvalds 已提交
78
	}
79 80 81 82 83 84 85
 
	/* use offset at 300 to skip daemons */
	ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
				ia64_ctx.max_ctx, 300);
	ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap,
				ia64_ctx.max_ctx, ia64_ctx.next);

86 87 88 89 90 91 92 93 94
	/*
	 * can't call flush_tlb_all() here because of race condition
	 * with O(1) scheduler [EF]
	 */
	cpu = get_cpu(); /* prevent preemption/migration */
	for_each_online_cpu(i)
		if (i != cpu)
			per_cpu(ia64_need_tlb_flush, i) = 1;
	put_cpu();
L
Linus Torvalds 已提交
95 96 97
	local_flush_tlb_all();
}

98 99 100 101 102 103
/*
 * Implement "spinaphores" ... like counting semaphores, but they
 * spin instead of sleeping.  If there are ever any other users for
 * this primitive it can be moved up to a spinaphore.h header.
 */
struct spinaphore {
104 105
	unsigned long	ticket;
	unsigned long	serve;
106 107 108 109
};

static inline void spinaphore_init(struct spinaphore *ss, int val)
{
110 111
	ss->ticket = 0;
	ss->serve = val;
112 113 114 115
}

static inline void down_spin(struct spinaphore *ss)
{
116 117 118 119 120 121 122 123
	unsigned long t = ia64_fetchadd(1, &ss->ticket, acq), serve;

	if (time_before(t, ss->serve))
		return;

	ia64_invala();

	for (;;) {
T
Tony Luck 已提交
124
		asm volatile ("ld8.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory");
125 126 127 128
		if (time_before(t, serve))
			return;
		cpu_relax();
	}
129 130 131 132
}

static inline void up_spin(struct spinaphore *ss)
{
133
	ia64_fetchadd(1, &ss->serve, rel);
134 135 136 137 138 139 140
}

static struct spinaphore ptcg_sem;
static u16 nptcg = 1;
static int need_ptcg_sem = 1;
static int toolatetochangeptcgsem = 0;

141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
/*
 * Kernel parameter "nptcg=" overrides max number of concurrent global TLB
 * purges which is reported from either PAL or SAL PALO.
 *
 * We don't have sanity checking for nptcg value. It's the user's responsibility
 * for valid nptcg value on the platform. Otherwise, kernel may hang in some
 * cases.
 */
static int __init
set_nptcg(char *str)
{
	int value = 0;

	get_option(&str, &value);
	setup_ptcg_sem(value, NPTCG_FROM_KERNEL_PARAMETER);

	return 1;
}

__setup("nptcg=", set_nptcg);

162 163 164 165 166 167 168
/*
 * Maximum number of simultaneous ptc.g purges in the system can
 * be defined by PAL_VM_SUMMARY (in which case we should take
 * the smallest value for any cpu in the system) or by the PAL
 * override table (in which case we should ignore the value from
 * PAL_VM_SUMMARY).
 *
169 170 171 172
 * Kernel parameter "nptcg=" overrides maximum number of simultanesous ptc.g
 * purges defined in either PAL_VM_SUMMARY or PAL override table. In this case,
 * we should ignore the value from either PAL_VM_SUMMARY or PAL override table.
 *
173 174 175 176
 * Complicating the logic here is the fact that num_possible_cpus()
 * isn't fully setup until we start bringing cpus online.
 */
void
177
setup_ptcg_sem(int max_purges, int nptcg_from)
178
{
179 180
	static int kp_override;
	static int palo_override;
181 182 183
	static int firstcpu = 1;

	if (toolatetochangeptcgsem) {
184 185 186 187
		if (nptcg_from == NPTCG_FROM_PAL && max_purges == 0)
			BUG_ON(1 < nptcg);
		else
			BUG_ON(max_purges < nptcg);
188 189 190
		return;
	}

191 192 193 194 195 196 197 198 199 200 201 202
	if (nptcg_from == NPTCG_FROM_KERNEL_PARAMETER) {
		kp_override = 1;
		nptcg = max_purges;
		goto resetsema;
	}
	if (kp_override) {
		need_ptcg_sem = num_possible_cpus() > nptcg;
		return;
	}

	if (nptcg_from == NPTCG_FROM_PALO) {
		palo_override = 1;
203 204 205 206 207 208 209 210 211 212 213

		/* In PALO max_purges == 0 really means it! */
		if (max_purges == 0)
			panic("Whoa! Platform does not support global TLB purges.\n");
		nptcg = max_purges;
		if (nptcg == PALO_MAX_TLB_PURGES) {
			need_ptcg_sem = 0;
			return;
		}
		goto resetsema;
	}
214
	if (palo_override) {
215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
		if (nptcg != PALO_MAX_TLB_PURGES)
			need_ptcg_sem = (num_possible_cpus() > nptcg);
		return;
	}

	/* In PAL_VM_SUMMARY max_purges == 0 actually means 1 */
	if (max_purges == 0) max_purges = 1;

	if (firstcpu) {
		nptcg = max_purges;
		firstcpu = 0;
	}
	if (max_purges < nptcg)
		nptcg = max_purges;
	if (nptcg == PAL_MAX_PURGES) {
		need_ptcg_sem = 0;
		return;
	} else
		need_ptcg_sem = (num_possible_cpus() > nptcg);

resetsema:
	spinaphore_init(&ptcg_sem, max_purges);
}

L
Linus Torvalds 已提交
239
void
240 241
ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start,
		       unsigned long end, unsigned long nbits)
L
Linus Torvalds 已提交
242
{
243 244
	struct mm_struct *active_mm = current->active_mm;

245 246
	toolatetochangeptcgsem = 1;

247 248 249 250 251 252 253 254
	if (mm != active_mm) {
		/* Restore region IDs for mm */
		if (mm && active_mm) {
			activate_context(mm);
		} else {
			flush_tlb_all();
			return;
		}
255 256
	}

257 258 259 260 261 262 263 264 265 266 267 268 269 270
	if (need_ptcg_sem)
		down_spin(&ptcg_sem);

	do {
		/*
		 * Flush ALAT entries also.
		 */
		ia64_ptcga(start, (nbits << 2));
		ia64_srlz_i();
		start += (1UL << nbits);
	} while (start < end);

	if (need_ptcg_sem)
		up_spin(&ptcg_sem);
271 272 273 274

        if (mm != active_mm) {
                activate_context(active_mm);
        }
L
Linus Torvalds 已提交
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300
}

void
local_flush_tlb_all (void)
{
	unsigned long i, j, flags, count0, count1, stride0, stride1, addr;

	addr    = local_cpu_data->ptce_base;
	count0  = local_cpu_data->ptce_count[0];
	count1  = local_cpu_data->ptce_count[1];
	stride0 = local_cpu_data->ptce_stride[0];
	stride1 = local_cpu_data->ptce_stride[1];

	local_irq_save(flags);
	for (i = 0; i < count0; ++i) {
		for (j = 0; j < count1; ++j) {
			ia64_ptce(addr);
			addr += stride1;
		}
		addr += stride0;
	}
	local_irq_restore(flags);
	ia64_srlz_i();			/* srlz.i implies srlz.d */
}

void
301 302
flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
		 unsigned long end)
L
Linus Torvalds 已提交
303 304 305 306 307
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long size = end - start;
	unsigned long nbits;

308
#ifndef CONFIG_SMP
L
Linus Torvalds 已提交
309 310 311 312
	if (mm != current->active_mm) {
		mm->context = 0;
		return;
	}
313
#endif
L
Linus Torvalds 已提交
314 315

	nbits = ia64_fls(size + 0xfff);
316 317
	while (unlikely (((1UL << nbits) & purge.mask) == 0) &&
			(nbits < purge.max_bits))
L
Linus Torvalds 已提交
318 319 320 321 322
		++nbits;
	if (nbits > purge.max_bits)
		nbits = purge.max_bits;
	start &= ~((1UL << nbits) - 1);

323
	preempt_disable();
324
#ifdef CONFIG_SMP
325
	if (mm != current->active_mm || cpumask_weight(mm_cpumask(mm)) != 1) {
326 327 328 329 330
		platform_global_tlb_purge(mm, start, end, nbits);
		preempt_enable();
		return;
	}
#endif
L
Linus Torvalds 已提交
331 332 333 334
	do {
		ia64_ptcl(start, (nbits<<2));
		start += (1UL << nbits);
	} while (start < end);
335
	preempt_enable();
L
Linus Torvalds 已提交
336 337 338 339
	ia64_srlz_i();			/* srlz.i implies srlz.d */
}
EXPORT_SYMBOL(flush_tlb_range);

340
void ia64_tlb_init(void)
L
Linus Torvalds 已提交
341
{
342
	ia64_ptce_info_t uninitialized_var(ptce_info); /* GCC be quiet */
343
	u64 tr_pgbits;
L
Linus Torvalds 已提交
344
	long status;
345 346 347
	pal_vm_info_1_u_t vm_info_1;
	pal_vm_info_2_u_t vm_info_2;
	int cpu = smp_processor_id();
L
Linus Torvalds 已提交
348 349

	if ((status = ia64_pal_vm_page_size(&tr_pgbits, &purge.mask)) != 0) {
350
		printk(KERN_ERR "PAL_VM_PAGE_SIZE failed with status=%ld; "
L
Linus Torvalds 已提交
351 352 353 354 355 356 357 358 359 360 361 362
		       "defaulting to architected purge page-sizes.\n", status);
		purge.mask = 0x115557000UL;
	}
	purge.max_bits = ia64_fls(purge.mask);

	ia64_get_ptce(&ptce_info);
	local_cpu_data->ptce_base = ptce_info.base;
	local_cpu_data->ptce_count[0] = ptce_info.count[0];
	local_cpu_data->ptce_count[1] = ptce_info.count[1];
	local_cpu_data->ptce_stride[0] = ptce_info.stride[0];
	local_cpu_data->ptce_stride[1] = ptce_info.stride[1];

363
	local_flush_tlb_all();	/* nuke left overs from bootstrapping... */
364 365 366 367 368 369 370 371 372 373 374 375 376
	status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2);

	if (status) {
		printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
		per_cpu(ia64_tr_num, cpu) = 8;
		return;
	}
	per_cpu(ia64_tr_num, cpu) = vm_info_1.pal_vm_info_1_s.max_itr_entry+1;
	if (per_cpu(ia64_tr_num, cpu) >
				(vm_info_1.pal_vm_info_1_s.max_dtr_entry+1))
		per_cpu(ia64_tr_num, cpu) =
				vm_info_1.pal_vm_info_1_s.max_dtr_entry+1;
	if (per_cpu(ia64_tr_num, cpu) > IA64_TR_ALLOC_MAX) {
377
		static int justonce = 1;
378
		per_cpu(ia64_tr_num, cpu) = IA64_TR_ALLOC_MAX;
379 380 381 382 383
		if (justonce) {
			justonce = 0;
			printk(KERN_DEBUG "TR register number exceeds "
			       "IA64_TR_ALLOC_MAX!\n");
		}
384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431
	}
}

/*
 * is_tr_overlap
 *
 * Check overlap with inserted TRs.
 */
static int is_tr_overlap(struct ia64_tr_entry *p, u64 va, u64 log_size)
{
	u64 tr_log_size;
	u64 tr_end;
	u64 va_rr = ia64_get_rr(va);
	u64 va_rid = RR_TO_RID(va_rr);
	u64 va_end = va + (1<<log_size) - 1;

	if (va_rid != RR_TO_RID(p->rr))
		return 0;
	tr_log_size = (p->itir & 0xff) >> 2;
	tr_end = p->ifa + (1<<tr_log_size) - 1;

	if (va > tr_end || p->ifa > va_end)
		return 0;
	return 1;

}

/*
 * ia64_insert_tr in virtual mode. Allocate a TR slot
 *
 * target_mask : 0x1 : itr, 0x2 : dtr, 0x3 : idtr
 *
 * va 	: virtual address.
 * pte 	: pte entries inserted.
 * log_size: range to be covered.
 *
 * Return value:  <0 :  error No.
 *
 *		  >=0 : slot number allocated for TR.
 * Must be called with preemption disabled.
 */
int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
{
	int i, r;
	unsigned long psr;
	struct ia64_tr_entry *p;
	int cpu = smp_processor_id();

432 433 434 435 436 437
	if (!ia64_idtrs[cpu]) {
		ia64_idtrs[cpu] = kmalloc(2 * IA64_TR_ALLOC_MAX *
				sizeof (struct ia64_tr_entry), GFP_KERNEL);
		if (!ia64_idtrs[cpu])
			return -ENOMEM;
	}
438 439 440
	r = -EINVAL;
	/*Check overlap with existing TR entries*/
	if (target_mask & 0x1) {
441
		p = ia64_idtrs[cpu];
442 443 444 445 446
		for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
								i++, p++) {
			if (p->pte & 0x1)
				if (is_tr_overlap(p, va, log_size)) {
					printk(KERN_DEBUG "Overlapped Entry"
N
Nik Nyby 已提交
447
						"Inserted for TR Register!!\n");
448 449 450 451 452
					goto out;
			}
		}
	}
	if (target_mask & 0x2) {
453
		p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX;
454 455 456 457 458
		for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
								i++, p++) {
			if (p->pte & 0x1)
				if (is_tr_overlap(p, va, log_size)) {
					printk(KERN_DEBUG "Overlapped Entry"
N
Nik Nyby 已提交
459
						"Inserted for TR Register!!\n");
460 461 462 463 464 465 466 467
					goto out;
				}
		}
	}

	for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) {
		switch (target_mask & 0x3) {
		case 1:
468
			if (!((ia64_idtrs[cpu] + i)->pte & 0x1))
469 470 471
				goto found;
			continue;
		case 2:
472
			if (!((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
473 474 475
				goto found;
			continue;
		case 3:
476 477
			if (!((ia64_idtrs[cpu] + i)->pte & 0x1) &&
			    !((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496
				goto found;
			continue;
		default:
			r = -EINVAL;
			goto out;
		}
	}
found:
	if (i >= per_cpu(ia64_tr_num, cpu))
		return -EBUSY;

	/*Record tr info for mca hander use!*/
	if (i > per_cpu(ia64_tr_used, cpu))
		per_cpu(ia64_tr_used, cpu) = i;

	psr = ia64_clear_ic();
	if (target_mask & 0x1) {
		ia64_itr(0x1, i, va, pte, log_size);
		ia64_srlz_i();
497
		p = ia64_idtrs[cpu] + i;
498 499 500 501 502 503 504 505
		p->ifa = va;
		p->pte = pte;
		p->itir = log_size << 2;
		p->rr = ia64_get_rr(va);
	}
	if (target_mask & 0x2) {
		ia64_itr(0x2, i, va, pte, log_size);
		ia64_srlz_i();
506
		p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i;
507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
		p->ifa = va;
		p->pte = pte;
		p->itir = log_size << 2;
		p->rr = ia64_get_rr(va);
	}
	ia64_set_psr(psr);
	r = i;
out:
	return r;
}
EXPORT_SYMBOL_GPL(ia64_itr_entry);

/*
 * ia64_purge_tr
 *
 * target_mask: 0x1: purge itr, 0x2 : purge dtr, 0x3 purge idtr.
 * slot: slot number to be freed.
 *
 * Must be called with preemption disabled.
 */
void ia64_ptr_entry(u64 target_mask, int slot)
{
	int cpu = smp_processor_id();
	int i;
	struct ia64_tr_entry *p;

	if (slot < IA64_TR_ALLOC_BASE || slot >= per_cpu(ia64_tr_num, cpu))
		return;

	if (target_mask & 0x1) {
537
		p = ia64_idtrs[cpu] + slot;
538 539 540 541 542 543 544 545
		if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
			p->pte = 0;
			ia64_ptr(0x1, p->ifa, p->itir>>2);
			ia64_srlz_i();
		}
	}

	if (target_mask & 0x2) {
546
		p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + slot;
547 548 549 550 551 552 553 554
		if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
			p->pte = 0;
			ia64_ptr(0x2, p->ifa, p->itir>>2);
			ia64_srlz_i();
		}
	}

	for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) {
555 556
		if (((ia64_idtrs[cpu] + i)->pte & 0x1) ||
		    ((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
557 558 559
			break;
	}
	per_cpu(ia64_tr_used, cpu) = i;
L
Linus Torvalds 已提交
560
}
561
EXPORT_SYMBOL_GPL(ia64_ptr_entry);