tlb.c 13.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * TLB support routines.
 *
 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
 *	David Mosberger-Tang <davidm@hpl.hp.com>
 *
 * 08/02/00 A. Mallick <asit.k.mallick@intel.com>
 *		Modified RID allocation for SMP
 *          Goutham Rao <goutham.rao@intel.com>
 *              IPI based ptc implementation and A-step IPI implementation.
11 12
 * Rohit Seth <rohit.seth@intel.com>
 * Ken Chen <kenneth.w.chen@intel.com>
13
 * Christophe de Dinechin <ddd@hp.com>: Avoid ptc.e on memory allocation
14 15 16
 * Copyright (C) 2007 Intel Corp
 *	Fenghua Yu <fenghua.yu@intel.com>
 *	Add multiple ptc.g/ptc.ga instruction support in global tlb purge.
L
Linus Torvalds 已提交
17 18 19 20 21 22 23
 */
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/mm.h>
24
#include <linux/bootmem.h>
L
Linus Torvalds 已提交
25 26 27 28 29 30

#include <asm/delay.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
#include <asm/pal.h>
#include <asm/tlbflush.h>
31
#include <asm/dma.h>
32
#include <asm/processor.h>
33
#include <asm/sal.h>
34
#include <asm/tlb.h>
L
Linus Torvalds 已提交
35 36 37

static struct {
	unsigned long mask;	/* mask of supported purge page-sizes */
38
	unsigned long max_bits;	/* log2 of largest supported purge page-size */
L
Linus Torvalds 已提交
39 40 41
} purge;

struct ia64_ctx ia64_ctx = {
42 43 44
	.lock =	__SPIN_LOCK_UNLOCKED(ia64_ctx.lock),
	.next =	1,
	.max_ctx = ~0U
L
Linus Torvalds 已提交
45 46 47
};

DEFINE_PER_CPU(u8, ia64_need_tlb_flush);
48 49 50 51
DEFINE_PER_CPU(u8, ia64_tr_num);  /*Number of TR slots in current processor*/
DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/

struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX];
L
Linus Torvalds 已提交
52

53 54 55 56 57 58 59 60 61 62 63 64
/*
 * Initializes the ia64_ctx.bitmap array based on max_ctx+1.
 * Called after cpu_init() has setup ia64_ctx.max_ctx based on
 * maximum RID that is supported by boot CPU.
 */
void __init
mmu_context_init (void)
{
	ia64_ctx.bitmap = alloc_bootmem((ia64_ctx.max_ctx+1)>>3);
	ia64_ctx.flushmap = alloc_bootmem((ia64_ctx.max_ctx+1)>>3);
}

L
Linus Torvalds 已提交
65 66 67 68 69 70
/*
 * Acquire the ia64_ctx.lock before calling this function!
 */
void
wrap_mmu_context (struct mm_struct *mm)
{
71
	int i, cpu;
72
	unsigned long flush_bit;
L
Linus Torvalds 已提交
73

74 75 76
	for (i=0; i <= ia64_ctx.max_ctx / BITS_PER_LONG; i++) {
		flush_bit = xchg(&ia64_ctx.flushmap[i], 0);
		ia64_ctx.bitmap[i] ^= flush_bit;
L
Linus Torvalds 已提交
77
	}
78 79 80 81 82 83 84
 
	/* use offset at 300 to skip daemons */
	ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
				ia64_ctx.max_ctx, 300);
	ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap,
				ia64_ctx.max_ctx, ia64_ctx.next);

85 86 87 88 89 90 91 92 93
	/*
	 * can't call flush_tlb_all() here because of race condition
	 * with O(1) scheduler [EF]
	 */
	cpu = get_cpu(); /* prevent preemption/migration */
	for_each_online_cpu(i)
		if (i != cpu)
			per_cpu(ia64_need_tlb_flush, i) = 1;
	put_cpu();
L
Linus Torvalds 已提交
94 95 96
	local_flush_tlb_all();
}

97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
/*
 * Implement "spinaphores" ... like counting semaphores, but they
 * spin instead of sleeping.  If there are ever any other users for
 * this primitive it can be moved up to a spinaphore.h header.
 */
struct spinaphore {
	atomic_t	cur;
};

static inline void spinaphore_init(struct spinaphore *ss, int val)
{
	atomic_set(&ss->cur, val);
}

static inline void down_spin(struct spinaphore *ss)
{
	while (unlikely(!atomic_add_unless(&ss->cur, -1, 0)))
		while (atomic_read(&ss->cur) == 0)
			cpu_relax();
}

static inline void up_spin(struct spinaphore *ss)
{
	atomic_add(1, &ss->cur);
}

static struct spinaphore ptcg_sem;
static u16 nptcg = 1;
static int need_ptcg_sem = 1;
static int toolatetochangeptcgsem = 0;

128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
/*
 * Kernel parameter "nptcg=" overrides max number of concurrent global TLB
 * purges which is reported from either PAL or SAL PALO.
 *
 * We don't have sanity checking for nptcg value. It's the user's responsibility
 * for valid nptcg value on the platform. Otherwise, kernel may hang in some
 * cases.
 */
static int __init
set_nptcg(char *str)
{
	int value = 0;

	get_option(&str, &value);
	setup_ptcg_sem(value, NPTCG_FROM_KERNEL_PARAMETER);

	return 1;
}

__setup("nptcg=", set_nptcg);

149 150 151 152 153 154 155
/*
 * Maximum number of simultaneous ptc.g purges in the system can
 * be defined by PAL_VM_SUMMARY (in which case we should take
 * the smallest value for any cpu in the system) or by the PAL
 * override table (in which case we should ignore the value from
 * PAL_VM_SUMMARY).
 *
156 157 158 159
 * Kernel parameter "nptcg=" overrides maximum number of simultanesous ptc.g
 * purges defined in either PAL_VM_SUMMARY or PAL override table. In this case,
 * we should ignore the value from either PAL_VM_SUMMARY or PAL override table.
 *
160 161 162 163
 * Complicating the logic here is the fact that num_possible_cpus()
 * isn't fully setup until we start bringing cpus online.
 */
void
164
setup_ptcg_sem(int max_purges, int nptcg_from)
165
{
166 167
	static int kp_override;
	static int palo_override;
168 169 170
	static int firstcpu = 1;

	if (toolatetochangeptcgsem) {
171 172 173 174
		if (nptcg_from == NPTCG_FROM_PAL && max_purges == 0)
			BUG_ON(1 < nptcg);
		else
			BUG_ON(max_purges < nptcg);
175 176 177
		return;
	}

178 179 180 181 182 183 184 185 186 187 188 189
	if (nptcg_from == NPTCG_FROM_KERNEL_PARAMETER) {
		kp_override = 1;
		nptcg = max_purges;
		goto resetsema;
	}
	if (kp_override) {
		need_ptcg_sem = num_possible_cpus() > nptcg;
		return;
	}

	if (nptcg_from == NPTCG_FROM_PALO) {
		palo_override = 1;
190 191 192 193 194 195 196 197 198 199 200

		/* In PALO max_purges == 0 really means it! */
		if (max_purges == 0)
			panic("Whoa! Platform does not support global TLB purges.\n");
		nptcg = max_purges;
		if (nptcg == PALO_MAX_TLB_PURGES) {
			need_ptcg_sem = 0;
			return;
		}
		goto resetsema;
	}
201
	if (palo_override) {
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
		if (nptcg != PALO_MAX_TLB_PURGES)
			need_ptcg_sem = (num_possible_cpus() > nptcg);
		return;
	}

	/* In PAL_VM_SUMMARY max_purges == 0 actually means 1 */
	if (max_purges == 0) max_purges = 1;

	if (firstcpu) {
		nptcg = max_purges;
		firstcpu = 0;
	}
	if (max_purges < nptcg)
		nptcg = max_purges;
	if (nptcg == PAL_MAX_PURGES) {
		need_ptcg_sem = 0;
		return;
	} else
		need_ptcg_sem = (num_possible_cpus() > nptcg);

resetsema:
	spinaphore_init(&ptcg_sem, max_purges);
}

L
Linus Torvalds 已提交
226
void
227 228
ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start,
		       unsigned long end, unsigned long nbits)
L
Linus Torvalds 已提交
229
{
230 231
	struct mm_struct *active_mm = current->active_mm;

232 233
	toolatetochangeptcgsem = 1;

234 235 236 237 238 239 240 241
	if (mm != active_mm) {
		/* Restore region IDs for mm */
		if (mm && active_mm) {
			activate_context(mm);
		} else {
			flush_tlb_all();
			return;
		}
242 243
	}

244 245 246 247 248 249 250 251 252 253 254 255 256 257
	if (need_ptcg_sem)
		down_spin(&ptcg_sem);

	do {
		/*
		 * Flush ALAT entries also.
		 */
		ia64_ptcga(start, (nbits << 2));
		ia64_srlz_i();
		start += (1UL << nbits);
	} while (start < end);

	if (need_ptcg_sem)
		up_spin(&ptcg_sem);
258 259 260 261

        if (mm != active_mm) {
                activate_context(active_mm);
        }
L
Linus Torvalds 已提交
262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
}

void
local_flush_tlb_all (void)
{
	unsigned long i, j, flags, count0, count1, stride0, stride1, addr;

	addr    = local_cpu_data->ptce_base;
	count0  = local_cpu_data->ptce_count[0];
	count1  = local_cpu_data->ptce_count[1];
	stride0 = local_cpu_data->ptce_stride[0];
	stride1 = local_cpu_data->ptce_stride[1];

	local_irq_save(flags);
	for (i = 0; i < count0; ++i) {
		for (j = 0; j < count1; ++j) {
			ia64_ptce(addr);
			addr += stride1;
		}
		addr += stride0;
	}
	local_irq_restore(flags);
	ia64_srlz_i();			/* srlz.i implies srlz.d */
}

void
288 289
flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
		 unsigned long end)
L
Linus Torvalds 已提交
290 291 292 293 294
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long size = end - start;
	unsigned long nbits;

295
#ifndef CONFIG_SMP
L
Linus Torvalds 已提交
296 297 298 299
	if (mm != current->active_mm) {
		mm->context = 0;
		return;
	}
300
#endif
L
Linus Torvalds 已提交
301 302

	nbits = ia64_fls(size + 0xfff);
303 304
	while (unlikely (((1UL << nbits) & purge.mask) == 0) &&
			(nbits < purge.max_bits))
L
Linus Torvalds 已提交
305 306 307 308 309
		++nbits;
	if (nbits > purge.max_bits)
		nbits = purge.max_bits;
	start &= ~((1UL << nbits) - 1);

310
	preempt_disable();
311
#ifdef CONFIG_SMP
312
	if (mm != current->active_mm || cpumask_weight(mm_cpumask(mm)) != 1) {
313 314 315 316 317
		platform_global_tlb_purge(mm, start, end, nbits);
		preempt_enable();
		return;
	}
#endif
L
Linus Torvalds 已提交
318 319 320 321
	do {
		ia64_ptcl(start, (nbits<<2));
		start += (1UL << nbits);
	} while (start < end);
322
	preempt_enable();
L
Linus Torvalds 已提交
323 324 325 326 327 328 329
	ia64_srlz_i();			/* srlz.i implies srlz.d */
}
EXPORT_SYMBOL(flush_tlb_range);

void __devinit
ia64_tlb_init (void)
{
330
	ia64_ptce_info_t uninitialized_var(ptce_info); /* GCC be quiet */
L
Linus Torvalds 已提交
331 332
	unsigned long tr_pgbits;
	long status;
333 334 335
	pal_vm_info_1_u_t vm_info_1;
	pal_vm_info_2_u_t vm_info_2;
	int cpu = smp_processor_id();
L
Linus Torvalds 已提交
336 337

	if ((status = ia64_pal_vm_page_size(&tr_pgbits, &purge.mask)) != 0) {
338
		printk(KERN_ERR "PAL_VM_PAGE_SIZE failed with status=%ld; "
L
Linus Torvalds 已提交
339 340 341 342 343 344 345 346 347 348 349 350
		       "defaulting to architected purge page-sizes.\n", status);
		purge.mask = 0x115557000UL;
	}
	purge.max_bits = ia64_fls(purge.mask);

	ia64_get_ptce(&ptce_info);
	local_cpu_data->ptce_base = ptce_info.base;
	local_cpu_data->ptce_count[0] = ptce_info.count[0];
	local_cpu_data->ptce_count[1] = ptce_info.count[1];
	local_cpu_data->ptce_stride[0] = ptce_info.stride[0];
	local_cpu_data->ptce_stride[1] = ptce_info.stride[1];

351
	local_flush_tlb_all();	/* nuke left overs from bootstrapping... */
352 353 354 355 356 357 358 359 360 361 362 363 364
	status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2);

	if (status) {
		printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
		per_cpu(ia64_tr_num, cpu) = 8;
		return;
	}
	per_cpu(ia64_tr_num, cpu) = vm_info_1.pal_vm_info_1_s.max_itr_entry+1;
	if (per_cpu(ia64_tr_num, cpu) >
				(vm_info_1.pal_vm_info_1_s.max_dtr_entry+1))
		per_cpu(ia64_tr_num, cpu) =
				vm_info_1.pal_vm_info_1_s.max_dtr_entry+1;
	if (per_cpu(ia64_tr_num, cpu) > IA64_TR_ALLOC_MAX) {
365
		static int justonce = 1;
366
		per_cpu(ia64_tr_num, cpu) = IA64_TR_ALLOC_MAX;
367 368 369 370 371
		if (justonce) {
			justonce = 0;
			printk(KERN_DEBUG "TR register number exceeds "
			       "IA64_TR_ALLOC_MAX!\n");
		}
372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541
	}
}

/*
 * is_tr_overlap
 *
 * Check overlap with inserted TRs.
 */
static int is_tr_overlap(struct ia64_tr_entry *p, u64 va, u64 log_size)
{
	u64 tr_log_size;
	u64 tr_end;
	u64 va_rr = ia64_get_rr(va);
	u64 va_rid = RR_TO_RID(va_rr);
	u64 va_end = va + (1<<log_size) - 1;

	if (va_rid != RR_TO_RID(p->rr))
		return 0;
	tr_log_size = (p->itir & 0xff) >> 2;
	tr_end = p->ifa + (1<<tr_log_size) - 1;

	if (va > tr_end || p->ifa > va_end)
		return 0;
	return 1;

}

/*
 * ia64_insert_tr in virtual mode. Allocate a TR slot
 *
 * target_mask : 0x1 : itr, 0x2 : dtr, 0x3 : idtr
 *
 * va 	: virtual address.
 * pte 	: pte entries inserted.
 * log_size: range to be covered.
 *
 * Return value:  <0 :  error No.
 *
 *		  >=0 : slot number allocated for TR.
 * Must be called with preemption disabled.
 */
int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
{
	int i, r;
	unsigned long psr;
	struct ia64_tr_entry *p;
	int cpu = smp_processor_id();

	r = -EINVAL;
	/*Check overlap with existing TR entries*/
	if (target_mask & 0x1) {
		p = &__per_cpu_idtrs[cpu][0][0];
		for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
								i++, p++) {
			if (p->pte & 0x1)
				if (is_tr_overlap(p, va, log_size)) {
					printk(KERN_DEBUG "Overlapped Entry"
						"Inserted for TR Reigster!!\n");
					goto out;
			}
		}
	}
	if (target_mask & 0x2) {
		p = &__per_cpu_idtrs[cpu][1][0];
		for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
								i++, p++) {
			if (p->pte & 0x1)
				if (is_tr_overlap(p, va, log_size)) {
					printk(KERN_DEBUG "Overlapped Entry"
						"Inserted for TR Reigster!!\n");
					goto out;
				}
		}
	}

	for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) {
		switch (target_mask & 0x3) {
		case 1:
			if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1))
				goto found;
			continue;
		case 2:
			if (!(__per_cpu_idtrs[cpu][1][i].pte & 0x1))
				goto found;
			continue;
		case 3:
			if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1) &&
				!(__per_cpu_idtrs[cpu][1][i].pte & 0x1))
				goto found;
			continue;
		default:
			r = -EINVAL;
			goto out;
		}
	}
found:
	if (i >= per_cpu(ia64_tr_num, cpu))
		return -EBUSY;

	/*Record tr info for mca hander use!*/
	if (i > per_cpu(ia64_tr_used, cpu))
		per_cpu(ia64_tr_used, cpu) = i;

	psr = ia64_clear_ic();
	if (target_mask & 0x1) {
		ia64_itr(0x1, i, va, pte, log_size);
		ia64_srlz_i();
		p = &__per_cpu_idtrs[cpu][0][i];
		p->ifa = va;
		p->pte = pte;
		p->itir = log_size << 2;
		p->rr = ia64_get_rr(va);
	}
	if (target_mask & 0x2) {
		ia64_itr(0x2, i, va, pte, log_size);
		ia64_srlz_i();
		p = &__per_cpu_idtrs[cpu][1][i];
		p->ifa = va;
		p->pte = pte;
		p->itir = log_size << 2;
		p->rr = ia64_get_rr(va);
	}
	ia64_set_psr(psr);
	r = i;
out:
	return r;
}
EXPORT_SYMBOL_GPL(ia64_itr_entry);

/*
 * ia64_purge_tr
 *
 * target_mask: 0x1: purge itr, 0x2 : purge dtr, 0x3 purge idtr.
 * slot: slot number to be freed.
 *
 * Must be called with preemption disabled.
 */
void ia64_ptr_entry(u64 target_mask, int slot)
{
	int cpu = smp_processor_id();
	int i;
	struct ia64_tr_entry *p;

	if (slot < IA64_TR_ALLOC_BASE || slot >= per_cpu(ia64_tr_num, cpu))
		return;

	if (target_mask & 0x1) {
		p = &__per_cpu_idtrs[cpu][0][slot];
		if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
			p->pte = 0;
			ia64_ptr(0x1, p->ifa, p->itir>>2);
			ia64_srlz_i();
		}
	}

	if (target_mask & 0x2) {
		p = &__per_cpu_idtrs[cpu][1][slot];
		if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
			p->pte = 0;
			ia64_ptr(0x2, p->ifa, p->itir>>2);
			ia64_srlz_i();
		}
	}

	for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) {
		if ((__per_cpu_idtrs[cpu][0][i].pte & 0x1) ||
				(__per_cpu_idtrs[cpu][1][i].pte & 0x1))
			break;
	}
	per_cpu(ia64_tr_used, cpu) = i;
L
Linus Torvalds 已提交
542
}
543
EXPORT_SYMBOL_GPL(ia64_ptr_entry);