tsb.c 13.3 KB
Newer Older
1 2
/* arch/sparc64/mm/tsb.c
 *
3
 * Copyright (C) 2006, 2008 David S. Miller <davem@davemloft.net>
4 5 6
 */

#include <linux/kernel.h>
7
#include <linux/preempt.h>
8
#include <linux/slab.h>
9 10 11 12
#include <asm/system.h>
#include <asm/page.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
D
David S. Miller 已提交
13
#include <asm/mmu_context.h>
14
#include <asm/pgtable.h>
15
#include <asm/tsb.h>
16
#include <asm/oplib.h>
17 18 19

extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];

20
static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries)
21
{
22
	vaddr >>= hash_shift;
23
	return vaddr & (nentries - 1);
24 25
}

26
static inline int tag_compare(unsigned long tag, unsigned long vaddr)
27
{
28
	return (tag == (vaddr >> 22));
29 30 31 32 33 34 35 36 37 38 39 40
}

/* TSB flushes need only occur on the processor initiating the address
 * space modification, not on each cpu the address space has run on.
 * Only the TLB flush needs that treatment.
 */

void flush_tsb_kernel_range(unsigned long start, unsigned long end)
{
	unsigned long v;

	for (v = start; v < end; v += PAGE_SIZE) {
41 42
		unsigned long hash = tsb_hash(v, PAGE_SHIFT,
					      KERNEL_TSB_NENTRIES);
43
		struct tsb *ent = &swapper_tsb[hash];
44

45
		if (tag_compare(ent->tag, v))
46
			ent->tag = (1UL << TSB_TAG_INVALID_BIT);
47 48 49
	}
}

P
Peter Zijlstra 已提交
50 51
static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
			    unsigned long tsb, unsigned long nentries)
52
{
53
	unsigned long i;
54

P
Peter Zijlstra 已提交
55 56
	for (i = 0; i < tb->tlb_nr; i++) {
		unsigned long v = tb->vaddrs[i];
57
		unsigned long tag, ent, hash;
58 59 60

		v &= ~0x1UL;

61 62
		hash = tsb_hash(v, hash_shift, nentries);
		ent = tsb + (hash * sizeof(struct tsb));
63
		tag = (v >> 22UL);
64 65

		tsb_flush(ent, tag);
66
	}
67 68
}

P
Peter Zijlstra 已提交
69
void flush_tsb_user(struct tlb_batch *tb)
70
{
P
Peter Zijlstra 已提交
71
	struct mm_struct *mm = tb->mm;
72 73 74
	unsigned long nentries, base, flags;

	spin_lock_irqsave(&mm->context.lock, flags);
75

76 77 78 79
	base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
	nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
	if (tlb_type == cheetah_plus || tlb_type == hypervisor)
		base = __pa(base);
P
Peter Zijlstra 已提交
80
	__flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
81 82 83 84 85 86 87

#ifdef CONFIG_HUGETLB_PAGE
	if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
		base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
		nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
		if (tlb_type == cheetah_plus || tlb_type == hypervisor)
			base = __pa(base);
P
Peter Zijlstra 已提交
88
		__flush_tsb_one(tb, HPAGE_SHIFT, base, nentries);
89 90
	}
#endif
91
	spin_unlock_irqrestore(&mm->context.lock, flags);
92
}
D
David S. Miller 已提交
93

94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
#if defined(CONFIG_SPARC64_PAGE_SIZE_8KB)
#define HV_PGSZ_IDX_BASE	HV_PGSZ_IDX_8K
#define HV_PGSZ_MASK_BASE	HV_PGSZ_MASK_8K
#elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB)
#define HV_PGSZ_IDX_BASE	HV_PGSZ_IDX_64K
#define HV_PGSZ_MASK_BASE	HV_PGSZ_MASK_64K
#else
#error Broken base page size setting...
#endif

#ifdef CONFIG_HUGETLB_PAGE
#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
#define HV_PGSZ_IDX_HUGE	HV_PGSZ_IDX_64K
#define HV_PGSZ_MASK_HUGE	HV_PGSZ_MASK_64K
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
#define HV_PGSZ_IDX_HUGE	HV_PGSZ_IDX_512K
#define HV_PGSZ_MASK_HUGE	HV_PGSZ_MASK_512K
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
#define HV_PGSZ_IDX_HUGE	HV_PGSZ_IDX_4MB
#define HV_PGSZ_MASK_HUGE	HV_PGSZ_MASK_4MB
#else
#error Broken huge page size setting...
#endif
#endif

static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes)
120 121 122 123
{
	unsigned long tsb_reg, base, tsb_paddr;
	unsigned long page_sz, tte;

124 125
	mm->context.tsb_block[tsb_idx].tsb_nentries =
		tsb_bytes / sizeof(struct tsb);
126 127

	base = TSBMAP_BASE;
128
	tte = pgprot_val(PAGE_KERNEL_LOCKED);
129
	tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
130
	BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177

	/* Use the smallest page size that can map the whole TSB
	 * in one TLB entry.
	 */
	switch (tsb_bytes) {
	case 8192 << 0:
		tsb_reg = 0x0UL;
#ifdef DCACHE_ALIASING_POSSIBLE
		base += (tsb_paddr & 8192);
#endif
		page_sz = 8192;
		break;

	case 8192 << 1:
		tsb_reg = 0x1UL;
		page_sz = 64 * 1024;
		break;

	case 8192 << 2:
		tsb_reg = 0x2UL;
		page_sz = 64 * 1024;
		break;

	case 8192 << 3:
		tsb_reg = 0x3UL;
		page_sz = 64 * 1024;
		break;

	case 8192 << 4:
		tsb_reg = 0x4UL;
		page_sz = 512 * 1024;
		break;

	case 8192 << 5:
		tsb_reg = 0x5UL;
		page_sz = 512 * 1024;
		break;

	case 8192 << 6:
		tsb_reg = 0x6UL;
		page_sz = 512 * 1024;
		break;

	case 8192 << 7:
		tsb_reg = 0x7UL;
		page_sz = 4 * 1024 * 1024;
		break;
178 179

	default:
180 181 182
		printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n",
		       current->comm, current->pid, tsb_bytes);
		do_exit(SIGSEGV);
183
	};
184
	tte |= pte_sz_bits(page_sz);
185

186
	if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
187 188 189
		/* Physical mapping, no locked TLB entry for TSB.  */
		tsb_reg |= tsb_paddr;

190 191 192
		mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
		mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0;
		mm->context.tsb_block[tsb_idx].tsb_map_pte = 0;
193 194 195 196 197
	} else {
		tsb_reg |= base;
		tsb_reg |= (tsb_paddr & (page_sz - 1UL));
		tte |= (tsb_paddr & ~(page_sz - 1UL));

198 199 200
		mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
		mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base;
		mm->context.tsb_block[tsb_idx].tsb_map_pte = tte;
201
	}
202

203 204
	/* Setup the Hypervisor TSB descriptor.  */
	if (tlb_type == hypervisor) {
205
		struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx];
206

207 208 209
		switch (tsb_idx) {
		case MM_TSB_BASE:
			hp->pgsz_idx = HV_PGSZ_IDX_BASE;
210
			break;
211 212 213
#ifdef CONFIG_HUGETLB_PAGE
		case MM_TSB_HUGE:
			hp->pgsz_idx = HV_PGSZ_IDX_HUGE;
214
			break;
215 216 217
#endif
		default:
			BUG();
218 219 220 221
		};
		hp->assoc = 1;
		hp->num_ttes = tsb_bytes / 16;
		hp->ctx_idx = 0;
222 223 224
		switch (tsb_idx) {
		case MM_TSB_BASE:
			hp->pgsz_mask = HV_PGSZ_MASK_BASE;
225
			break;
226 227 228
#ifdef CONFIG_HUGETLB_PAGE
		case MM_TSB_HUGE:
			hp->pgsz_mask = HV_PGSZ_MASK_HUGE;
229
			break;
230 231 232
#endif
		default:
			BUG();
233 234 235 236
		};
		hp->tsb_base = tsb_paddr;
		hp->resv = 0;
	}
237 238
}

239
static struct kmem_cache *tsb_caches[8] __read_mostly;
240 241 242 243 244 245 246 247 248 249 250 251

static const char *tsb_cache_names[8] = {
	"tsb_8KB",
	"tsb_16KB",
	"tsb_32KB",
	"tsb_64KB",
	"tsb_128KB",
	"tsb_256KB",
	"tsb_512KB",
	"tsb_1MB",
};

D
David Miller 已提交
252
void __init pgtable_cache_init(void)
253 254 255 256 257 258 259 260 261
{
	unsigned long i;

	for (i = 0; i < 8; i++) {
		unsigned long size = 8192 << i;
		const char *name = tsb_cache_names[i];

		tsb_caches[i] = kmem_cache_create(name,
						  size, size,
262
						  0, NULL);
263 264 265 266 267 268 269
		if (!tsb_caches[i]) {
			prom_printf("Could not create %s cache\n", name);
			prom_halt();
		}
	}
}

D
David S. Miller 已提交
270 271 272 273 274 275 276 277 278 279 280 281
int sysctl_tsb_ratio = -2;

static unsigned long tsb_size_to_rss_limit(unsigned long new_size)
{
	unsigned long num_ents = (new_size / sizeof(struct tsb));

	if (sysctl_tsb_ratio < 0)
		return num_ents - (num_ents >> -sysctl_tsb_ratio);
	else
		return num_ents + (num_ents >> sysctl_tsb_ratio);
}

282 283
/* When the RSS of an address space exceeds tsb_rss_limit for a TSB,
 * do_sparc64_fault() invokes this routine to try and grow it.
284
 *
285
 * When we reach the maximum TSB size supported, we stick ~0UL into
286
 * tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault()
287 288 289 290
 * will not trigger any longer.
 *
 * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
 * of two.  The TSB must be aligned to it's size, so f.e. a 512K TSB
291 292
 * must be 512K aligned.  It also must be physically contiguous, so we
 * cannot use vmalloc().
293 294 295 296 297
 *
 * The idea here is to grow the TSB when the RSS of the process approaches
 * the number of entries that the current TSB can hold at once.  Currently,
 * we trigger when the RSS hits 3/4 of the TSB capacity.
 */
298
void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
299 300
{
	unsigned long max_tsb_size = 1 * 1024 * 1024;
301
	unsigned long new_size, old_size, flags;
302
	struct tsb *old_tsb, *new_tsb;
303 304
	unsigned long new_cache_index, old_cache_index;
	unsigned long new_rss_limit;
305
	gfp_t gfp_flags;
306 307 308 309

	if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
		max_tsb_size = (PAGE_SIZE << MAX_ORDER);

310 311
	new_cache_index = 0;
	for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) {
D
David S. Miller 已提交
312 313
		new_rss_limit = tsb_size_to_rss_limit(new_size);
		if (new_rss_limit > rss)
314
			break;
315
		new_cache_index++;
316 317
	}

318
	if (new_size == max_tsb_size)
319 320
		new_rss_limit = ~0UL;

321
retry_tsb_alloc:
322
	gfp_flags = GFP_KERNEL;
323
	if (new_size > (PAGE_SIZE * 2))
324 325
		gfp_flags = __GFP_NOWARN | __GFP_NORETRY;

326 327
	new_tsb = kmem_cache_alloc_node(tsb_caches[new_cache_index],
					gfp_flags, numa_node_id());
328
	if (unlikely(!new_tsb)) {
329 330 331 332 333
		/* Not being able to fork due to a high-order TSB
		 * allocation failure is very bad behavior.  Just back
		 * down to a 0-order allocation and force no TSB
		 * growing for this address space.
		 */
334 335
		if (mm->context.tsb_block[tsb_index].tsb == NULL &&
		    new_cache_index > 0) {
336 337
			new_cache_index = 0;
			new_size = 8192;
338
			new_rss_limit = ~0UL;
339
			goto retry_tsb_alloc;
340 341 342 343 344
		}

		/* If we failed on a TSB grow, we are under serious
		 * memory pressure so don't try to grow any more.
		 */
345 346
		if (mm->context.tsb_block[tsb_index].tsb != NULL)
			mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL;
347
		return;
348
	}
349

350
	/* Mark all tags as invalid.  */
351
	tsb_init(new_tsb, new_size);
352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376

	/* Ok, we are about to commit the changes.  If we are
	 * growing an existing TSB the locking is very tricky,
	 * so WATCH OUT!
	 *
	 * We have to hold mm->context.lock while committing to the
	 * new TSB, this synchronizes us with processors in
	 * flush_tsb_user() and switch_mm() for this address space.
	 *
	 * But even with that lock held, processors run asynchronously
	 * accessing the old TSB via TLB miss handling.  This is OK
	 * because those actions are just propagating state from the
	 * Linux page tables into the TSB, page table mappings are not
	 * being changed.  If a real fault occurs, the processor will
	 * synchronize with us when it hits flush_tsb_user(), this is
	 * also true for the case where vmscan is modifying the page
	 * tables.  The only thing we need to be careful with is to
	 * skip any locked TSB entries during copy_tsb().
	 *
	 * When we finish committing to the new TSB, we have to drop
	 * the lock and ask all other cpus running this address space
	 * to run tsb_context_switch() to see the new TSB table.
	 */
	spin_lock_irqsave(&mm->context.lock, flags);

377 378 379 380 381
	old_tsb = mm->context.tsb_block[tsb_index].tsb;
	old_cache_index =
		(mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL);
	old_size = (mm->context.tsb_block[tsb_index].tsb_nentries *
		    sizeof(struct tsb));
382

383

384 385 386 387
	/* Handle multiple threads trying to grow the TSB at the same time.
	 * One will get in here first, and bump the size and the RSS limit.
	 * The others will get in here next and hit this check.
	 */
388 389
	if (unlikely(old_tsb &&
		     (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) {
390 391
		spin_unlock_irqrestore(&mm->context.lock, flags);

392
		kmem_cache_free(tsb_caches[new_cache_index], new_tsb);
393 394
		return;
	}
395

396
	mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit;
397

398 399 400 401 402 403 404 405 406 407 408 409
	if (old_tsb) {
		extern void copy_tsb(unsigned long old_tsb_base,
				     unsigned long old_tsb_size,
				     unsigned long new_tsb_base,
				     unsigned long new_tsb_size);
		unsigned long old_tsb_base = (unsigned long) old_tsb;
		unsigned long new_tsb_base = (unsigned long) new_tsb;

		if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
			old_tsb_base = __pa(old_tsb_base);
			new_tsb_base = __pa(new_tsb_base);
		}
410
		copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
411
	}
412

413 414
	mm->context.tsb_block[tsb_index].tsb = new_tsb;
	setup_tsb_params(mm, tsb_index, new_size);
415

416 417
	spin_unlock_irqrestore(&mm->context.lock, flags);

418 419 420 421
	/* If old_tsb is NULL, we're being invoked for the first time
	 * from init_new_context().
	 */
	if (old_tsb) {
422
		/* Reload it on the local cpu.  */
423 424
		tsb_context_switch(mm);

425
		/* Now force other processors to do the same.  */
426
		preempt_disable();
427
		smp_tsb_sync(mm);
428
		preempt_enable();
429 430

		/* Now it is safe to free the old tsb.  */
431
		kmem_cache_free(tsb_caches[old_cache_index], old_tsb);
432 433 434
	}
}

D
David S. Miller 已提交
435 436
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
437 438 439 440 441
#ifdef CONFIG_HUGETLB_PAGE
	unsigned long huge_pte_count;
#endif
	unsigned int i;

442
	spin_lock_init(&mm->context.lock);
D
David S. Miller 已提交
443 444 445

	mm->context.sparc64_ctx_val = 0UL;

446 447 448 449 450 451 452 453 454
#ifdef CONFIG_HUGETLB_PAGE
	/* We reset it to zero because the fork() page copying
	 * will re-increment the counters as the parent PTEs are
	 * copied into the child address space.
	 */
	huge_pte_count = mm->context.huge_pte_count;
	mm->context.huge_pte_count = 0;
#endif

455 456 457 458
	/* copy_mm() copies over the parent's mm_struct before calling
	 * us, so we need to zero out the TSB pointer or else tsb_grow()
	 * will be confused and think there is an older TSB to free up.
	 */
459 460
	for (i = 0; i < MM_NUM_TSBS; i++)
		mm->context.tsb_block[i].tsb = NULL;
461 462 463 464

	/* If this is fork, inherit the parent's TSB size.  We would
	 * grow it to that size on the first page fault anyways.
	 */
465
	tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm));
466

467 468 469 470 471 472
#ifdef CONFIG_HUGETLB_PAGE
	if (unlikely(huge_pte_count))
		tsb_grow(mm, MM_TSB_HUGE, huge_pte_count);
#endif

	if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
473
		return -ENOMEM;
D
David S. Miller 已提交
474 475 476 477

	return 0;
}

478
static void tsb_destroy_one(struct tsb_config *tp)
D
David S. Miller 已提交
479
{
480
	unsigned long cache_index;
481

482 483 484 485 486 487 488
	if (!tp->tsb)
		return;
	cache_index = tp->tsb_reg_val & 0x7UL;
	kmem_cache_free(tsb_caches[cache_index], tp->tsb);
	tp->tsb = NULL;
	tp->tsb_reg_val = 0UL;
}
489

490 491 492 493 494 495
void destroy_context(struct mm_struct *mm)
{
	unsigned long flags, i;

	for (i = 0; i < MM_NUM_TSBS; i++)
		tsb_destroy_one(&mm->context.tsb_block[i]);
D
David S. Miller 已提交
496

497
	spin_lock_irqsave(&ctx_alloc_lock, flags);
D
David S. Miller 已提交
498 499 500 501 502 503

	if (CTX_VALID(mm->context)) {
		unsigned long nr = CTX_NRBITS(mm->context);
		mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
	}

504
	spin_unlock_irqrestore(&ctx_alloc_lock, flags);
D
David S. Miller 已提交
505
}