tsb.c 13.2 KB
Newer Older
1 2
/* arch/sparc64/mm/tsb.c
 *
3
 * Copyright (C) 2006, 2008 David S. Miller <davem@davemloft.net>
4 5 6
 */

#include <linux/kernel.h>
7
#include <linux/preempt.h>
8
#include <linux/slab.h>
9 10 11
#include <asm/page.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
D
David S. Miller 已提交
12
#include <asm/mmu_context.h>
13
#include <asm/pgtable.h>
14
#include <asm/tsb.h>
15
#include <asm/oplib.h>
16 17 18

extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];

19
static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries)
20
{
21
	vaddr >>= hash_shift;
22
	return vaddr & (nentries - 1);
23 24
}

25
static inline int tag_compare(unsigned long tag, unsigned long vaddr)
26
{
27
	return (tag == (vaddr >> 22));
28 29 30 31 32 33 34 35 36 37 38 39
}

/* TSB flushes need only occur on the processor initiating the address
 * space modification, not on each cpu the address space has run on.
 * Only the TLB flush needs that treatment.
 */

void flush_tsb_kernel_range(unsigned long start, unsigned long end)
{
	unsigned long v;

	for (v = start; v < end; v += PAGE_SIZE) {
40 41
		unsigned long hash = tsb_hash(v, PAGE_SHIFT,
					      KERNEL_TSB_NENTRIES);
42
		struct tsb *ent = &swapper_tsb[hash];
43

44
		if (tag_compare(ent->tag, v))
45
			ent->tag = (1UL << TSB_TAG_INVALID_BIT);
46 47 48
	}
}

P
Peter Zijlstra 已提交
49 50
static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
			    unsigned long tsb, unsigned long nentries)
51
{
52
	unsigned long i;
53

P
Peter Zijlstra 已提交
54 55
	for (i = 0; i < tb->tlb_nr; i++) {
		unsigned long v = tb->vaddrs[i];
56
		unsigned long tag, ent, hash;
57 58 59

		v &= ~0x1UL;

60 61
		hash = tsb_hash(v, hash_shift, nentries);
		ent = tsb + (hash * sizeof(struct tsb));
62
		tag = (v >> 22UL);
63 64

		tsb_flush(ent, tag);
65
	}
66 67
}

P
Peter Zijlstra 已提交
68
void flush_tsb_user(struct tlb_batch *tb)
69
{
P
Peter Zijlstra 已提交
70
	struct mm_struct *mm = tb->mm;
71 72 73
	unsigned long nentries, base, flags;

	spin_lock_irqsave(&mm->context.lock, flags);
74

75 76 77 78
	base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
	nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
	if (tlb_type == cheetah_plus || tlb_type == hypervisor)
		base = __pa(base);
P
Peter Zijlstra 已提交
79
	__flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
80 81 82 83 84 85 86

#ifdef CONFIG_HUGETLB_PAGE
	if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
		base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
		nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
		if (tlb_type == cheetah_plus || tlb_type == hypervisor)
			base = __pa(base);
P
Peter Zijlstra 已提交
87
		__flush_tsb_one(tb, HPAGE_SHIFT, base, nentries);
88 89
	}
#endif
90
	spin_unlock_irqrestore(&mm->context.lock, flags);
91
}
D
David S. Miller 已提交
92

93 94 95 96 97 98 99 100 101
#define HV_PGSZ_IDX_BASE	HV_PGSZ_IDX_8K
#define HV_PGSZ_MASK_BASE	HV_PGSZ_MASK_8K

#ifdef CONFIG_HUGETLB_PAGE
#define HV_PGSZ_IDX_HUGE	HV_PGSZ_IDX_4MB
#define HV_PGSZ_MASK_HUGE	HV_PGSZ_MASK_4MB
#endif

static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes)
102 103 104 105
{
	unsigned long tsb_reg, base, tsb_paddr;
	unsigned long page_sz, tte;

106 107
	mm->context.tsb_block[tsb_idx].tsb_nentries =
		tsb_bytes / sizeof(struct tsb);
108 109

	base = TSBMAP_BASE;
110
	tte = pgprot_val(PAGE_KERNEL_LOCKED);
111
	tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
112
	BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159

	/* Use the smallest page size that can map the whole TSB
	 * in one TLB entry.
	 */
	switch (tsb_bytes) {
	case 8192 << 0:
		tsb_reg = 0x0UL;
#ifdef DCACHE_ALIASING_POSSIBLE
		base += (tsb_paddr & 8192);
#endif
		page_sz = 8192;
		break;

	case 8192 << 1:
		tsb_reg = 0x1UL;
		page_sz = 64 * 1024;
		break;

	case 8192 << 2:
		tsb_reg = 0x2UL;
		page_sz = 64 * 1024;
		break;

	case 8192 << 3:
		tsb_reg = 0x3UL;
		page_sz = 64 * 1024;
		break;

	case 8192 << 4:
		tsb_reg = 0x4UL;
		page_sz = 512 * 1024;
		break;

	case 8192 << 5:
		tsb_reg = 0x5UL;
		page_sz = 512 * 1024;
		break;

	case 8192 << 6:
		tsb_reg = 0x6UL;
		page_sz = 512 * 1024;
		break;

	case 8192 << 7:
		tsb_reg = 0x7UL;
		page_sz = 4 * 1024 * 1024;
		break;
160 161

	default:
162 163 164
		printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n",
		       current->comm, current->pid, tsb_bytes);
		do_exit(SIGSEGV);
165
	}
166
	tte |= pte_sz_bits(page_sz);
167

168
	if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
169 170 171
		/* Physical mapping, no locked TLB entry for TSB.  */
		tsb_reg |= tsb_paddr;

172 173 174
		mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
		mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0;
		mm->context.tsb_block[tsb_idx].tsb_map_pte = 0;
175 176 177 178 179
	} else {
		tsb_reg |= base;
		tsb_reg |= (tsb_paddr & (page_sz - 1UL));
		tte |= (tsb_paddr & ~(page_sz - 1UL));

180 181 182
		mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
		mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base;
		mm->context.tsb_block[tsb_idx].tsb_map_pte = tte;
183
	}
184

185 186
	/* Setup the Hypervisor TSB descriptor.  */
	if (tlb_type == hypervisor) {
187
		struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx];
188

189 190 191
		switch (tsb_idx) {
		case MM_TSB_BASE:
			hp->pgsz_idx = HV_PGSZ_IDX_BASE;
192
			break;
193 194 195
#ifdef CONFIG_HUGETLB_PAGE
		case MM_TSB_HUGE:
			hp->pgsz_idx = HV_PGSZ_IDX_HUGE;
196
			break;
197 198 199
#endif
		default:
			BUG();
200
		}
201 202 203
		hp->assoc = 1;
		hp->num_ttes = tsb_bytes / 16;
		hp->ctx_idx = 0;
204 205 206
		switch (tsb_idx) {
		case MM_TSB_BASE:
			hp->pgsz_mask = HV_PGSZ_MASK_BASE;
207
			break;
208 209 210
#ifdef CONFIG_HUGETLB_PAGE
		case MM_TSB_HUGE:
			hp->pgsz_mask = HV_PGSZ_MASK_HUGE;
211
			break;
212 213 214
#endif
		default:
			BUG();
215
		}
216 217 218
		hp->tsb_base = tsb_paddr;
		hp->resv = 0;
	}
219 220
}

221 222
struct kmem_cache *pgtable_cache __read_mostly;

223
static struct kmem_cache *tsb_caches[8] __read_mostly;
224 225 226 227 228 229 230 231 232 233 234 235

static const char *tsb_cache_names[8] = {
	"tsb_8KB",
	"tsb_16KB",
	"tsb_32KB",
	"tsb_64KB",
	"tsb_128KB",
	"tsb_256KB",
	"tsb_512KB",
	"tsb_1MB",
};

D
David Miller 已提交
236
void __init pgtable_cache_init(void)
237 238 239
{
	unsigned long i;

240 241 242 243 244 245 246 247 248
	pgtable_cache = kmem_cache_create("pgtable_cache",
					  PAGE_SIZE, PAGE_SIZE,
					  0,
					  _clear_page);
	if (!pgtable_cache) {
		prom_printf("pgtable_cache_init(): Could not create!\n");
		prom_halt();
	}

249 250 251 252 253 254
	for (i = 0; i < 8; i++) {
		unsigned long size = 8192 << i;
		const char *name = tsb_cache_names[i];

		tsb_caches[i] = kmem_cache_create(name,
						  size, size,
255
						  0, NULL);
256 257 258 259 260 261 262
		if (!tsb_caches[i]) {
			prom_printf("Could not create %s cache\n", name);
			prom_halt();
		}
	}
}

D
David S. Miller 已提交
263 264 265 266 267 268 269 270 271 272 273 274
int sysctl_tsb_ratio = -2;

static unsigned long tsb_size_to_rss_limit(unsigned long new_size)
{
	unsigned long num_ents = (new_size / sizeof(struct tsb));

	if (sysctl_tsb_ratio < 0)
		return num_ents - (num_ents >> -sysctl_tsb_ratio);
	else
		return num_ents + (num_ents >> sysctl_tsb_ratio);
}

275 276
/* When the RSS of an address space exceeds tsb_rss_limit for a TSB,
 * do_sparc64_fault() invokes this routine to try and grow it.
277
 *
278
 * When we reach the maximum TSB size supported, we stick ~0UL into
279
 * tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault()
280 281 282 283
 * will not trigger any longer.
 *
 * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
 * of two.  The TSB must be aligned to it's size, so f.e. a 512K TSB
284 285
 * must be 512K aligned.  It also must be physically contiguous, so we
 * cannot use vmalloc().
286 287 288 289 290
 *
 * The idea here is to grow the TSB when the RSS of the process approaches
 * the number of entries that the current TSB can hold at once.  Currently,
 * we trigger when the RSS hits 3/4 of the TSB capacity.
 */
291
void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
292 293
{
	unsigned long max_tsb_size = 1 * 1024 * 1024;
294
	unsigned long new_size, old_size, flags;
295
	struct tsb *old_tsb, *new_tsb;
296 297
	unsigned long new_cache_index, old_cache_index;
	unsigned long new_rss_limit;
298
	gfp_t gfp_flags;
299 300 301 302

	if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
		max_tsb_size = (PAGE_SIZE << MAX_ORDER);

303 304
	new_cache_index = 0;
	for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) {
D
David S. Miller 已提交
305 306
		new_rss_limit = tsb_size_to_rss_limit(new_size);
		if (new_rss_limit > rss)
307
			break;
308
		new_cache_index++;
309 310
	}

311
	if (new_size == max_tsb_size)
312 313
		new_rss_limit = ~0UL;

314
retry_tsb_alloc:
315
	gfp_flags = GFP_KERNEL;
316
	if (new_size > (PAGE_SIZE * 2))
317 318
		gfp_flags = __GFP_NOWARN | __GFP_NORETRY;

319 320
	new_tsb = kmem_cache_alloc_node(tsb_caches[new_cache_index],
					gfp_flags, numa_node_id());
321
	if (unlikely(!new_tsb)) {
322 323 324 325 326
		/* Not being able to fork due to a high-order TSB
		 * allocation failure is very bad behavior.  Just back
		 * down to a 0-order allocation and force no TSB
		 * growing for this address space.
		 */
327 328
		if (mm->context.tsb_block[tsb_index].tsb == NULL &&
		    new_cache_index > 0) {
329 330
			new_cache_index = 0;
			new_size = 8192;
331
			new_rss_limit = ~0UL;
332
			goto retry_tsb_alloc;
333 334 335 336 337
		}

		/* If we failed on a TSB grow, we are under serious
		 * memory pressure so don't try to grow any more.
		 */
338 339
		if (mm->context.tsb_block[tsb_index].tsb != NULL)
			mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL;
340
		return;
341
	}
342

343
	/* Mark all tags as invalid.  */
344
	tsb_init(new_tsb, new_size);
345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369

	/* Ok, we are about to commit the changes.  If we are
	 * growing an existing TSB the locking is very tricky,
	 * so WATCH OUT!
	 *
	 * We have to hold mm->context.lock while committing to the
	 * new TSB, this synchronizes us with processors in
	 * flush_tsb_user() and switch_mm() for this address space.
	 *
	 * But even with that lock held, processors run asynchronously
	 * accessing the old TSB via TLB miss handling.  This is OK
	 * because those actions are just propagating state from the
	 * Linux page tables into the TSB, page table mappings are not
	 * being changed.  If a real fault occurs, the processor will
	 * synchronize with us when it hits flush_tsb_user(), this is
	 * also true for the case where vmscan is modifying the page
	 * tables.  The only thing we need to be careful with is to
	 * skip any locked TSB entries during copy_tsb().
	 *
	 * When we finish committing to the new TSB, we have to drop
	 * the lock and ask all other cpus running this address space
	 * to run tsb_context_switch() to see the new TSB table.
	 */
	spin_lock_irqsave(&mm->context.lock, flags);

370 371 372 373 374
	old_tsb = mm->context.tsb_block[tsb_index].tsb;
	old_cache_index =
		(mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL);
	old_size = (mm->context.tsb_block[tsb_index].tsb_nentries *
		    sizeof(struct tsb));
375

376

377 378 379 380
	/* Handle multiple threads trying to grow the TSB at the same time.
	 * One will get in here first, and bump the size and the RSS limit.
	 * The others will get in here next and hit this check.
	 */
381 382
	if (unlikely(old_tsb &&
		     (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) {
383 384
		spin_unlock_irqrestore(&mm->context.lock, flags);

385
		kmem_cache_free(tsb_caches[new_cache_index], new_tsb);
386 387
		return;
	}
388

389
	mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit;
390

391 392 393 394 395 396 397 398 399 400 401 402
	if (old_tsb) {
		extern void copy_tsb(unsigned long old_tsb_base,
				     unsigned long old_tsb_size,
				     unsigned long new_tsb_base,
				     unsigned long new_tsb_size);
		unsigned long old_tsb_base = (unsigned long) old_tsb;
		unsigned long new_tsb_base = (unsigned long) new_tsb;

		if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
			old_tsb_base = __pa(old_tsb_base);
			new_tsb_base = __pa(new_tsb_base);
		}
403
		copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
404
	}
405

406 407
	mm->context.tsb_block[tsb_index].tsb = new_tsb;
	setup_tsb_params(mm, tsb_index, new_size);
408

409 410
	spin_unlock_irqrestore(&mm->context.lock, flags);

411 412 413 414
	/* If old_tsb is NULL, we're being invoked for the first time
	 * from init_new_context().
	 */
	if (old_tsb) {
415
		/* Reload it on the local cpu.  */
416 417
		tsb_context_switch(mm);

418
		/* Now force other processors to do the same.  */
419
		preempt_disable();
420
		smp_tsb_sync(mm);
421
		preempt_enable();
422 423

		/* Now it is safe to free the old tsb.  */
424
		kmem_cache_free(tsb_caches[old_cache_index], old_tsb);
425 426 427
	}
}

D
David S. Miller 已提交
428 429
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
430 431 432 433 434
#ifdef CONFIG_HUGETLB_PAGE
	unsigned long huge_pte_count;
#endif
	unsigned int i;

435
	spin_lock_init(&mm->context.lock);
D
David S. Miller 已提交
436 437 438

	mm->context.sparc64_ctx_val = 0UL;

439 440 441 442 443 444 445 446 447
#ifdef CONFIG_HUGETLB_PAGE
	/* We reset it to zero because the fork() page copying
	 * will re-increment the counters as the parent PTEs are
	 * copied into the child address space.
	 */
	huge_pte_count = mm->context.huge_pte_count;
	mm->context.huge_pte_count = 0;
#endif

448 449
	mm->context.pgtable_page = NULL;

450 451 452 453
	/* copy_mm() copies over the parent's mm_struct before calling
	 * us, so we need to zero out the TSB pointer or else tsb_grow()
	 * will be confused and think there is an older TSB to free up.
	 */
454 455
	for (i = 0; i < MM_NUM_TSBS; i++)
		mm->context.tsb_block[i].tsb = NULL;
456 457 458 459

	/* If this is fork, inherit the parent's TSB size.  We would
	 * grow it to that size on the first page fault anyways.
	 */
460
	tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm));
461

462 463 464 465 466 467
#ifdef CONFIG_HUGETLB_PAGE
	if (unlikely(huge_pte_count))
		tsb_grow(mm, MM_TSB_HUGE, huge_pte_count);
#endif

	if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
468
		return -ENOMEM;
D
David S. Miller 已提交
469 470 471 472

	return 0;
}

473
static void tsb_destroy_one(struct tsb_config *tp)
D
David S. Miller 已提交
474
{
475
	unsigned long cache_index;
476

477 478 479 480 481 482 483
	if (!tp->tsb)
		return;
	cache_index = tp->tsb_reg_val & 0x7UL;
	kmem_cache_free(tsb_caches[cache_index], tp->tsb);
	tp->tsb = NULL;
	tp->tsb_reg_val = 0UL;
}
484

485 486 487
void destroy_context(struct mm_struct *mm)
{
	unsigned long flags, i;
488
	struct page *page;
489 490 491

	for (i = 0; i < MM_NUM_TSBS; i++)
		tsb_destroy_one(&mm->context.tsb_block[i]);
D
David S. Miller 已提交
492

493 494 495 496 497 498
	page = mm->context.pgtable_page;
	if (page && put_page_testzero(page)) {
		pgtable_page_dtor(page);
		free_hot_cold_page(page, 0);
	}

499
	spin_lock_irqsave(&ctx_alloc_lock, flags);
D
David S. Miller 已提交
500 501 502 503 504 505

	if (CTX_VALID(mm->context)) {
		unsigned long nr = CTX_NRBITS(mm->context);
		mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
	}

506
	spin_unlock_irqrestore(&ctx_alloc_lock, flags);
D
David S. Miller 已提交
507
}