tsb.c 13.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
/* arch/sparc64/mm/tsb.c
 *
 * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
 */

#include <linux/kernel.h>
#include <asm/system.h>
#include <asm/page.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
D
David S. Miller 已提交
11
#include <asm/mmu_context.h>
12
#include <asm/pgtable.h>
13
#include <asm/tsb.h>
14
#include <asm/oplib.h>
15 16 17

extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];

18
static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries)
19
{
20
	vaddr >>= hash_shift;
21
	return vaddr & (nentries - 1);
22 23
}

24
static inline int tag_compare(unsigned long tag, unsigned long vaddr)
25
{
26
	return (tag == (vaddr >> 22));
27 28 29 30 31 32 33 34 35 36 37 38
}

/* TSB flushes need only occur on the processor initiating the address
 * space modification, not on each cpu the address space has run on.
 * Only the TLB flush needs that treatment.
 */

void flush_tsb_kernel_range(unsigned long start, unsigned long end)
{
	unsigned long v;

	for (v = start; v < end; v += PAGE_SIZE) {
39 40
		unsigned long hash = tsb_hash(v, PAGE_SHIFT,
					      KERNEL_TSB_NENTRIES);
41
		struct tsb *ent = &swapper_tsb[hash];
42

43 44
		if (tag_compare(ent->tag, v)) {
			ent->tag = (1UL << TSB_TAG_INVALID_BIT);
45 46 47 48 49
			membar_storeload_storestore();
		}
	}
}

50
static void __flush_tsb_one(struct mmu_gather *mp, unsigned long hash_shift, unsigned long tsb, unsigned long nentries)
51
{
52
	unsigned long i;
53

54 55
	for (i = 0; i < mp->tlb_nr; i++) {
		unsigned long v = mp->vaddrs[i];
56
		unsigned long tag, ent, hash;
57 58 59

		v &= ~0x1UL;

60 61
		hash = tsb_hash(v, hash_shift, nentries);
		ent = tsb + (hash * sizeof(struct tsb));
62
		tag = (v >> 22UL);
63 64

		tsb_flush(ent, tag);
65
	}
66 67 68 69 70 71 72 73
}

void flush_tsb_user(struct mmu_gather *mp)
{
	struct mm_struct *mm = mp->mm;
	unsigned long nentries, base, flags;

	spin_lock_irqsave(&mm->context.lock, flags);
74

75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
	base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
	nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
	if (tlb_type == cheetah_plus || tlb_type == hypervisor)
		base = __pa(base);
	__flush_tsb_one(mp, PAGE_SHIFT, base, nentries);

#ifdef CONFIG_HUGETLB_PAGE
	if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
		base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
		nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
		if (tlb_type == cheetah_plus || tlb_type == hypervisor)
			base = __pa(base);
		__flush_tsb_one(mp, HPAGE_SHIFT, base, nentries);
	}
#endif
90
	spin_unlock_irqrestore(&mm->context.lock, flags);
91
}
D
David S. Miller 已提交
92

93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
#if defined(CONFIG_SPARC64_PAGE_SIZE_8KB)
#define HV_PGSZ_IDX_BASE	HV_PGSZ_IDX_8K
#define HV_PGSZ_MASK_BASE	HV_PGSZ_MASK_8K
#elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB)
#define HV_PGSZ_IDX_BASE	HV_PGSZ_IDX_64K
#define HV_PGSZ_MASK_BASE	HV_PGSZ_MASK_64K
#elif defined(CONFIG_SPARC64_PAGE_SIZE_512KB)
#define HV_PGSZ_IDX_BASE	HV_PGSZ_IDX_512K
#define HV_PGSZ_MASK_BASE	HV_PGSZ_MASK_512K
#elif defined(CONFIG_SPARC64_PAGE_SIZE_4MB)
#define HV_PGSZ_IDX_BASE	HV_PGSZ_IDX_4MB
#define HV_PGSZ_MASK_BASE	HV_PGSZ_MASK_4MB
#else
#error Broken base page size setting...
#endif

#ifdef CONFIG_HUGETLB_PAGE
#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
#define HV_PGSZ_IDX_HUGE	HV_PGSZ_IDX_64K
#define HV_PGSZ_MASK_HUGE	HV_PGSZ_MASK_64K
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
#define HV_PGSZ_IDX_HUGE	HV_PGSZ_IDX_512K
#define HV_PGSZ_MASK_HUGE	HV_PGSZ_MASK_512K
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
#define HV_PGSZ_IDX_HUGE	HV_PGSZ_IDX_4MB
#define HV_PGSZ_MASK_HUGE	HV_PGSZ_MASK_4MB
#else
#error Broken huge page size setting...
#endif
#endif

static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes)
125 126 127 128
{
	unsigned long tsb_reg, base, tsb_paddr;
	unsigned long page_sz, tte;

129 130
	mm->context.tsb_block[tsb_idx].tsb_nentries =
		tsb_bytes / sizeof(struct tsb);
131 132

	base = TSBMAP_BASE;
133
	tte = pgprot_val(PAGE_KERNEL_LOCKED);
134
	tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
135
	BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182

	/* Use the smallest page size that can map the whole TSB
	 * in one TLB entry.
	 */
	switch (tsb_bytes) {
	case 8192 << 0:
		tsb_reg = 0x0UL;
#ifdef DCACHE_ALIASING_POSSIBLE
		base += (tsb_paddr & 8192);
#endif
		page_sz = 8192;
		break;

	case 8192 << 1:
		tsb_reg = 0x1UL;
		page_sz = 64 * 1024;
		break;

	case 8192 << 2:
		tsb_reg = 0x2UL;
		page_sz = 64 * 1024;
		break;

	case 8192 << 3:
		tsb_reg = 0x3UL;
		page_sz = 64 * 1024;
		break;

	case 8192 << 4:
		tsb_reg = 0x4UL;
		page_sz = 512 * 1024;
		break;

	case 8192 << 5:
		tsb_reg = 0x5UL;
		page_sz = 512 * 1024;
		break;

	case 8192 << 6:
		tsb_reg = 0x6UL;
		page_sz = 512 * 1024;
		break;

	case 8192 << 7:
		tsb_reg = 0x7UL;
		page_sz = 4 * 1024 * 1024;
		break;
183 184 185

	default:
		BUG();
186
	};
187
	tte |= pte_sz_bits(page_sz);
188

189
	if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
190 191 192
		/* Physical mapping, no locked TLB entry for TSB.  */
		tsb_reg |= tsb_paddr;

193 194 195
		mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
		mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0;
		mm->context.tsb_block[tsb_idx].tsb_map_pte = 0;
196 197 198 199 200
	} else {
		tsb_reg |= base;
		tsb_reg |= (tsb_paddr & (page_sz - 1UL));
		tte |= (tsb_paddr & ~(page_sz - 1UL));

201 202 203
		mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
		mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base;
		mm->context.tsb_block[tsb_idx].tsb_map_pte = tte;
204
	}
205

206 207
	/* Setup the Hypervisor TSB descriptor.  */
	if (tlb_type == hypervisor) {
208
		struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx];
209

210 211 212
		switch (tsb_idx) {
		case MM_TSB_BASE:
			hp->pgsz_idx = HV_PGSZ_IDX_BASE;
213
			break;
214 215 216
#ifdef CONFIG_HUGETLB_PAGE
		case MM_TSB_HUGE:
			hp->pgsz_idx = HV_PGSZ_IDX_HUGE;
217
			break;
218 219 220
#endif
		default:
			BUG();
221 222 223 224
		};
		hp->assoc = 1;
		hp->num_ttes = tsb_bytes / 16;
		hp->ctx_idx = 0;
225 226 227
		switch (tsb_idx) {
		case MM_TSB_BASE:
			hp->pgsz_mask = HV_PGSZ_MASK_BASE;
228
			break;
229 230 231
#ifdef CONFIG_HUGETLB_PAGE
		case MM_TSB_HUGE:
			hp->pgsz_mask = HV_PGSZ_MASK_HUGE;
232
			break;
233 234 235
#endif
		default:
			BUG();
236 237 238 239
		};
		hp->tsb_base = tsb_paddr;
		hp->resv = 0;
	}
240 241
}

242
static struct kmem_cache *tsb_caches[8] __read_mostly;
243 244 245 246 247 248 249 250 251 252 253 254

static const char *tsb_cache_names[8] = {
	"tsb_8KB",
	"tsb_16KB",
	"tsb_32KB",
	"tsb_64KB",
	"tsb_128KB",
	"tsb_256KB",
	"tsb_512KB",
	"tsb_1MB",
};

D
David Miller 已提交
255
void __init pgtable_cache_init(void)
256 257 258 259 260 261 262 263 264
{
	unsigned long i;

	for (i = 0; i < 8; i++) {
		unsigned long size = 8192 << i;
		const char *name = tsb_cache_names[i];

		tsb_caches[i] = kmem_cache_create(name,
						  size, size,
265
						  0, NULL);
266 267 268 269 270 271 272
		if (!tsb_caches[i]) {
			prom_printf("Could not create %s cache\n", name);
			prom_halt();
		}
	}
}

273 274
/* When the RSS of an address space exceeds tsb_rss_limit for a TSB,
 * do_sparc64_fault() invokes this routine to try and grow it.
275
 *
276
 * When we reach the maximum TSB size supported, we stick ~0UL into
277
 * tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault()
278 279 280 281
 * will not trigger any longer.
 *
 * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
 * of two.  The TSB must be aligned to it's size, so f.e. a 512K TSB
282 283
 * must be 512K aligned.  It also must be physically contiguous, so we
 * cannot use vmalloc().
284 285 286 287 288
 *
 * The idea here is to grow the TSB when the RSS of the process approaches
 * the number of entries that the current TSB can hold at once.  Currently,
 * we trigger when the RSS hits 3/4 of the TSB capacity.
 */
289
void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
290 291
{
	unsigned long max_tsb_size = 1 * 1024 * 1024;
292
	unsigned long new_size, old_size, flags;
293
	struct tsb *old_tsb, *new_tsb;
294 295
	unsigned long new_cache_index, old_cache_index;
	unsigned long new_rss_limit;
296
	gfp_t gfp_flags;
297 298 299 300

	if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
		max_tsb_size = (PAGE_SIZE << MAX_ORDER);

301 302 303
	new_cache_index = 0;
	for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) {
		unsigned long n_entries = new_size / sizeof(struct tsb);
304 305 306 307

		n_entries = (n_entries * 3) / 4;
		if (n_entries > rss)
			break;
308 309

		new_cache_index++;
310 311
	}

312
	if (new_size == max_tsb_size)
313 314
		new_rss_limit = ~0UL;
	else
315
		new_rss_limit = ((new_size / sizeof(struct tsb)) * 3) / 4;
316

317
retry_tsb_alloc:
318
	gfp_flags = GFP_KERNEL;
319
	if (new_size > (PAGE_SIZE * 2))
320 321
		gfp_flags = __GFP_NOWARN | __GFP_NORETRY;

322 323
	new_tsb = kmem_cache_alloc(tsb_caches[new_cache_index], gfp_flags);
	if (unlikely(!new_tsb)) {
324 325 326 327 328
		/* Not being able to fork due to a high-order TSB
		 * allocation failure is very bad behavior.  Just back
		 * down to a 0-order allocation and force no TSB
		 * growing for this address space.
		 */
329 330
		if (mm->context.tsb_block[tsb_index].tsb == NULL &&
		    new_cache_index > 0) {
331 332
			new_cache_index = 0;
			new_size = 8192;
333
			new_rss_limit = ~0UL;
334
			goto retry_tsb_alloc;
335 336 337 338 339
		}

		/* If we failed on a TSB grow, we are under serious
		 * memory pressure so don't try to grow any more.
		 */
340 341
		if (mm->context.tsb_block[tsb_index].tsb != NULL)
			mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL;
342
		return;
343
	}
344

345
	/* Mark all tags as invalid.  */
346
	tsb_init(new_tsb, new_size);
347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371

	/* Ok, we are about to commit the changes.  If we are
	 * growing an existing TSB the locking is very tricky,
	 * so WATCH OUT!
	 *
	 * We have to hold mm->context.lock while committing to the
	 * new TSB, this synchronizes us with processors in
	 * flush_tsb_user() and switch_mm() for this address space.
	 *
	 * But even with that lock held, processors run asynchronously
	 * accessing the old TSB via TLB miss handling.  This is OK
	 * because those actions are just propagating state from the
	 * Linux page tables into the TSB, page table mappings are not
	 * being changed.  If a real fault occurs, the processor will
	 * synchronize with us when it hits flush_tsb_user(), this is
	 * also true for the case where vmscan is modifying the page
	 * tables.  The only thing we need to be careful with is to
	 * skip any locked TSB entries during copy_tsb().
	 *
	 * When we finish committing to the new TSB, we have to drop
	 * the lock and ask all other cpus running this address space
	 * to run tsb_context_switch() to see the new TSB table.
	 */
	spin_lock_irqsave(&mm->context.lock, flags);

372 373 374 375 376
	old_tsb = mm->context.tsb_block[tsb_index].tsb;
	old_cache_index =
		(mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL);
	old_size = (mm->context.tsb_block[tsb_index].tsb_nentries *
		    sizeof(struct tsb));
377

378

379 380 381 382
	/* Handle multiple threads trying to grow the TSB at the same time.
	 * One will get in here first, and bump the size and the RSS limit.
	 * The others will get in here next and hit this check.
	 */
383 384
	if (unlikely(old_tsb &&
		     (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) {
385 386
		spin_unlock_irqrestore(&mm->context.lock, flags);

387
		kmem_cache_free(tsb_caches[new_cache_index], new_tsb);
388 389
		return;
	}
390

391
	mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit;
392

393 394 395 396 397 398 399 400 401 402 403 404
	if (old_tsb) {
		extern void copy_tsb(unsigned long old_tsb_base,
				     unsigned long old_tsb_size,
				     unsigned long new_tsb_base,
				     unsigned long new_tsb_size);
		unsigned long old_tsb_base = (unsigned long) old_tsb;
		unsigned long new_tsb_base = (unsigned long) new_tsb;

		if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
			old_tsb_base = __pa(old_tsb_base);
			new_tsb_base = __pa(new_tsb_base);
		}
405
		copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
406
	}
407

408 409
	mm->context.tsb_block[tsb_index].tsb = new_tsb;
	setup_tsb_params(mm, tsb_index, new_size);
410

411 412
	spin_unlock_irqrestore(&mm->context.lock, flags);

413 414 415 416
	/* If old_tsb is NULL, we're being invoked for the first time
	 * from init_new_context().
	 */
	if (old_tsb) {
417
		/* Reload it on the local cpu.  */
418 419
		tsb_context_switch(mm);

420 421 422 423
		/* Now force other processors to do the same.  */
		smp_tsb_sync(mm);

		/* Now it is safe to free the old tsb.  */
424
		kmem_cache_free(tsb_caches[old_cache_index], old_tsb);
425 426 427
	}
}

D
David S. Miller 已提交
428 429
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
430 431 432 433 434
#ifdef CONFIG_HUGETLB_PAGE
	unsigned long huge_pte_count;
#endif
	unsigned int i;

435
	spin_lock_init(&mm->context.lock);
D
David S. Miller 已提交
436 437 438

	mm->context.sparc64_ctx_val = 0UL;

439 440 441 442 443 444 445 446 447
#ifdef CONFIG_HUGETLB_PAGE
	/* We reset it to zero because the fork() page copying
	 * will re-increment the counters as the parent PTEs are
	 * copied into the child address space.
	 */
	huge_pte_count = mm->context.huge_pte_count;
	mm->context.huge_pte_count = 0;
#endif

448 449 450 451
	/* copy_mm() copies over the parent's mm_struct before calling
	 * us, so we need to zero out the TSB pointer or else tsb_grow()
	 * will be confused and think there is an older TSB to free up.
	 */
452 453
	for (i = 0; i < MM_NUM_TSBS; i++)
		mm->context.tsb_block[i].tsb = NULL;
454 455 456 457

	/* If this is fork, inherit the parent's TSB size.  We would
	 * grow it to that size on the first page fault anyways.
	 */
458
	tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm));
459

460 461 462 463 464 465
#ifdef CONFIG_HUGETLB_PAGE
	if (unlikely(huge_pte_count))
		tsb_grow(mm, MM_TSB_HUGE, huge_pte_count);
#endif

	if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
466
		return -ENOMEM;
D
David S. Miller 已提交
467 468 469 470

	return 0;
}

471
static void tsb_destroy_one(struct tsb_config *tp)
D
David S. Miller 已提交
472
{
473
	unsigned long cache_index;
474

475 476 477 478 479 480 481
	if (!tp->tsb)
		return;
	cache_index = tp->tsb_reg_val & 0x7UL;
	kmem_cache_free(tsb_caches[cache_index], tp->tsb);
	tp->tsb = NULL;
	tp->tsb_reg_val = 0UL;
}
482

483 484 485 486 487 488
void destroy_context(struct mm_struct *mm)
{
	unsigned long flags, i;

	for (i = 0; i < MM_NUM_TSBS; i++)
		tsb_destroy_one(&mm->context.tsb_block[i]);
D
David S. Miller 已提交
489

490
	spin_lock_irqsave(&ctx_alloc_lock, flags);
D
David S. Miller 已提交
491 492 493 494 495 496

	if (CTX_VALID(mm->context)) {
		unsigned long nr = CTX_NRBITS(mm->context);
		mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
	}

497
	spin_unlock_irqrestore(&ctx_alloc_lock, flags);
D
David S. Miller 已提交
498
}