hugetlbpage.c 20.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * PPC64 (POWER4) Huge TLB Page Support for Kernel.
 *
 * Copyright (C) 2003 David Gibson, IBM Corporation.
 *
 * Based on the IA-32 version:
 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
 */

#include <linux/init.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/sysctl.h>
#include <asm/mman.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/machdep.h>
#include <asm/cputable.h>
25
#include <asm/spu.h>
L
Linus Torvalds 已提交
26

27 28 29
#define PAGE_SHIFT_64K	16
#define PAGE_SHIFT_16M	24
#define PAGE_SHIFT_16G	34
30

31 32
#define NUM_LOW_AREAS	(0x100000000UL >> SID_SHIFT)
#define NUM_HIGH_AREAS	(PGTABLE_RANGE >> HTLB_AREA_SHIFT)
33 34 35 36 37 38
#define MAX_NUMBER_GPAGES	1024

/* Tracks the 16G pages after the device tree is scanned and before the
 * huge_boot_pages list is ready.  */
static unsigned long gpage_freearray[MAX_NUMBER_GPAGES];
static unsigned nr_gpages;
39

40 41 42 43 44 45 46 47 48 49 50 51 52
/* Array of valid huge page sizes - non-zero value(hugepte_shift) is
 * stored for the huge page sizes that are valid.
 */
unsigned int mmu_huge_psizes[MMU_PAGE_COUNT] = { }; /* initialize all to 0 */

#define hugepte_shift			mmu_huge_psizes
#define PTRS_PER_HUGEPTE(psize)		(1 << hugepte_shift[psize])
#define HUGEPTE_TABLE_SIZE(psize)	(sizeof(pte_t) << hugepte_shift[psize])

#define HUGEPD_SHIFT(psize)		(mmu_psize_to_shift(psize) \
						+ hugepte_shift[psize])
#define HUGEPD_SIZE(psize)		(1UL << HUGEPD_SHIFT(psize))
#define HUGEPD_MASK(psize)		(~(HUGEPD_SIZE(psize)-1))
53

54 55
/* Subtract one from array size because we don't need a cache for 4K since
 * is not a huge page size */
56
#define HUGE_PGTABLE_INDEX(psize)	(HUGEPTE_CACHE_NUM + psize - 1)
57
#define HUGEPTE_CACHE_NAME(psize)	(huge_pgtable_cache_name[psize])
58

59 60 61 62
static const char *huge_pgtable_cache_name[MMU_PAGE_COUNT] = {
	"unused_4K", "hugepte_cache_64K", "unused_64K_AP",
	"hugepte_cache_1M", "hugepte_cache_16M", "hugepte_cache_16G"
};
63 64 65 66 67 68 69 70 71 72

/* Flag to mark huge PD pointers.  This means pmd_bad() and pud_bad()
 * will choke on pointers to hugepte tables, which is handy for
 * catching screwups early. */
#define HUGEPD_OK	0x1

typedef struct { unsigned long pd; } hugepd_t;

#define hugepd_none(hpd)	((hpd).pd == 0)

73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
static inline int shift_to_mmu_psize(unsigned int shift)
{
	switch (shift) {
#ifndef CONFIG_PPC_64K_PAGES
	case PAGE_SHIFT_64K:
	    return MMU_PAGE_64K;
#endif
	case PAGE_SHIFT_16M:
	    return MMU_PAGE_16M;
	case PAGE_SHIFT_16G:
	    return MMU_PAGE_16G;
	}
	return -1;
}

static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
{
	if (mmu_psize_defs[mmu_psize].shift)
		return mmu_psize_defs[mmu_psize].shift;
	BUG();
}

95 96 97 98 99 100
static inline pte_t *hugepd_page(hugepd_t hpd)
{
	BUG_ON(!(hpd.pd & HUGEPD_OK));
	return (pte_t *)(hpd.pd & ~HUGEPD_OK);
}

101 102
static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
				    struct hstate *hstate)
103
{
104 105 106
	unsigned int shift = huge_page_shift(hstate);
	int psize = shift_to_mmu_psize(shift);
	unsigned long idx = ((addr >> shift) & (PTRS_PER_HUGEPTE(psize)-1));
107 108 109 110 111 112
	pte_t *dir = hugepd_page(*hpdp);

	return dir + idx;
}

static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
113
			   unsigned long address, unsigned int psize)
114
{
115
	pte_t *new = kmem_cache_zalloc(pgtable_cache[HUGE_PGTABLE_INDEX(psize)],
116 117 118 119 120 121 122
				      GFP_KERNEL|__GFP_REPEAT);

	if (! new)
		return -ENOMEM;

	spin_lock(&mm->page_table_lock);
	if (!hugepd_none(*hpdp))
123
		kmem_cache_free(pgtable_cache[HUGE_PGTABLE_INDEX(psize)], new);
124 125 126 127 128 129
	else
		hpdp->pd = (unsigned long)new | HUGEPD_OK;
	spin_unlock(&mm->page_table_lock);
	return 0;
}

130 131 132 133 134 135 136 137 138 139

static pud_t *hpud_offset(pgd_t *pgd, unsigned long addr, struct hstate *hstate)
{
	if (huge_page_shift(hstate) < PUD_SHIFT)
		return pud_offset(pgd, addr);
	else
		return (pud_t *) pgd;
}
static pud_t *hpud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long addr,
			 struct hstate *hstate)
140
{
141 142 143 144 145 146 147 148
	if (huge_page_shift(hstate) < PUD_SHIFT)
		return pud_alloc(mm, pgd, addr);
	else
		return (pud_t *) pgd;
}
static pmd_t *hpmd_offset(pud_t *pud, unsigned long addr, struct hstate *hstate)
{
	if (huge_page_shift(hstate) < PMD_SHIFT)
149 150 151 152
		return pmd_offset(pud, addr);
	else
		return (pmd_t *) pud;
}
153 154
static pmd_t *hpmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr,
			 struct hstate *hstate)
155
{
156
	if (huge_page_shift(hstate) < PMD_SHIFT)
157 158 159 160 161
		return pmd_alloc(mm, pud, addr);
	else
		return (pmd_t *) pud;
}

162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
/* Build list of addresses of gigantic pages.  This function is used in early
 * boot before the buddy or bootmem allocator is setup.
 */
void add_gpage(unsigned long addr, unsigned long page_size,
	unsigned long number_of_pages)
{
	if (!addr)
		return;
	while (number_of_pages > 0) {
		gpage_freearray[nr_gpages] = addr;
		nr_gpages++;
		number_of_pages--;
		addr += page_size;
	}
}

178
/* Moves the gigantic page addresses from the temporary list to the
179 180 181
 * huge_boot_pages list.
 */
int alloc_bootmem_huge_page(struct hstate *hstate)
182 183 184 185 186 187 188
{
	struct huge_bootmem_page *m;
	if (nr_gpages == 0)
		return 0;
	m = phys_to_virt(gpage_freearray[--nr_gpages]);
	gpage_freearray[nr_gpages] = 0;
	list_add(&m->list, &huge_boot_pages);
189
	m->hstate = hstate;
190 191 192 193
	return 1;
}


194 195
/* Modelled after find_linux_pte() */
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
L
Linus Torvalds 已提交
196
{
197 198
	pgd_t *pg;
	pud_t *pu;
199
	pmd_t *pm;
L
Linus Torvalds 已提交
200

201 202 203 204 205 206 207 208
	unsigned int psize;
	unsigned int shift;
	unsigned long sz;
	struct hstate *hstate;
	psize = get_slice_psize(mm, addr);
	shift = mmu_psize_to_shift(psize);
	sz = ((1UL) << shift);
	hstate = size_to_hstate(sz);
L
Linus Torvalds 已提交
209

210
	addr &= hstate->mask;
211 212 213

	pg = pgd_offset(mm, addr);
	if (!pgd_none(*pg)) {
214
		pu = hpud_offset(pg, addr, hstate);
215
		if (!pud_none(*pu)) {
216
			pm = hpmd_offset(pu, addr, hstate);
217
			if (!pmd_none(*pm))
218 219
				return hugepte_offset((hugepd_t *)pm, addr,
						      hstate);
220 221
		}
	}
L
Linus Torvalds 已提交
222

223
	return NULL;
L
Linus Torvalds 已提交
224 225
}

226 227
pte_t *huge_pte_alloc(struct mm_struct *mm,
			unsigned long addr, unsigned long sz)
L
Linus Torvalds 已提交
228
{
229 230
	pgd_t *pg;
	pud_t *pu;
231
	pmd_t *pm;
232
	hugepd_t *hpdp = NULL;
233 234 235
	struct hstate *hstate;
	unsigned int psize;
	hstate = size_to_hstate(sz);
L
Linus Torvalds 已提交
236

237 238
	psize = get_slice_psize(mm, addr);
	BUG_ON(!mmu_huge_psizes[psize]);
L
Linus Torvalds 已提交
239

240
	addr &= hstate->mask;
L
Linus Torvalds 已提交
241

242
	pg = pgd_offset(mm, addr);
243
	pu = hpud_alloc(mm, pg, addr, hstate);
L
Linus Torvalds 已提交
244

245
	if (pu) {
246
		pm = hpmd_alloc(mm, pu, addr, hstate);
247 248 249 250 251 252 253
		if (pm)
			hpdp = (hugepd_t *)pm;
	}

	if (! hpdp)
		return NULL;

254
	if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, psize))
255 256
		return NULL;

257
	return hugepte_offset(hpdp, addr, hstate);
258 259
}

260 261 262 263 264
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
	return 0;
}

265 266
static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp,
			       unsigned int psize)
267 268 269 270 271
{
	pte_t *hugepte = hugepd_page(*hpdp);

	hpdp->pd = 0;
	tlb->need_flush = 1;
272 273
	pgtable_free_tlb(tlb, pgtable_free_cache(hugepte,
						 HUGEPTE_CACHE_NUM+psize-1,
A
Adam Litke 已提交
274
						 PGF_CACHENUM_MASK));
275 276 277 278
}

static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
				   unsigned long addr, unsigned long end,
279 280
				   unsigned long floor, unsigned long ceiling,
				   unsigned int psize)
281 282 283 284 285 286 287 288 289 290 291
{
	pmd_t *pmd;
	unsigned long next;
	unsigned long start;

	start = addr;
	pmd = pmd_offset(pud, addr);
	do {
		next = pmd_addr_end(addr, end);
		if (pmd_none(*pmd))
			continue;
292
		free_hugepte_range(tlb, (hugepd_t *)pmd, psize);
293 294 295 296 297 298 299 300 301
	} while (pmd++, addr = next, addr != end);

	start &= PUD_MASK;
	if (start < floor)
		return;
	if (ceiling) {
		ceiling &= PUD_MASK;
		if (!ceiling)
			return;
L
Linus Torvalds 已提交
302
	}
303 304
	if (end - 1 > ceiling - 1)
		return;
L
Linus Torvalds 已提交
305

306 307
	pmd = pmd_offset(pud, start);
	pud_clear(pud);
308
	pmd_free_tlb(tlb, pmd, start);
309 310 311 312 313 314 315 316 317
}

static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
				   unsigned long addr, unsigned long end,
				   unsigned long floor, unsigned long ceiling)
{
	pud_t *pud;
	unsigned long next;
	unsigned long start;
318 319 320
	unsigned int shift;
	unsigned int psize = get_slice_psize(tlb->mm, addr);
	shift = mmu_psize_to_shift(psize);
321 322 323 324 325

	start = addr;
	pud = pud_offset(pgd, addr);
	do {
		next = pud_addr_end(addr, end);
326
		if (shift < PMD_SHIFT) {
327 328
			if (pud_none_or_clear_bad(pud))
				continue;
329 330
			hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
					       ceiling, psize);
331 332 333
		} else {
			if (pud_none(*pud))
				continue;
334
			free_hugepte_range(tlb, (hugepd_t *)pud, psize);
335
		}
336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
	} while (pud++, addr = next, addr != end);

	start &= PGDIR_MASK;
	if (start < floor)
		return;
	if (ceiling) {
		ceiling &= PGDIR_MASK;
		if (!ceiling)
			return;
	}
	if (end - 1 > ceiling - 1)
		return;

	pud = pud_offset(pgd, start);
	pgd_clear(pgd);
351
	pud_free_tlb(tlb, pud, start);
352 353 354 355 356 357 358
}

/*
 * This function frees user-level page tables of a process.
 *
 * Must be called with pagetable lock held.
 */
359
void hugetlb_free_pgd_range(struct mmu_gather *tlb,
360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
			    unsigned long addr, unsigned long end,
			    unsigned long floor, unsigned long ceiling)
{
	pgd_t *pgd;
	unsigned long next;
	unsigned long start;

	/*
	 * Comments below take from the normal free_pgd_range().  They
	 * apply here too.  The tests against HUGEPD_MASK below are
	 * essential, because we *don't* test for this at the bottom
	 * level.  Without them we'll attempt to free a hugepte table
	 * when we unmap just part of it, even if there are other
	 * active mappings using it.
	 *
	 * The next few lines have given us lots of grief...
	 *
	 * Why are we testing HUGEPD* at this top level?  Because
	 * often there will be no work to do at all, and we'd prefer
	 * not to go all the way down to the bottom just to discover
	 * that.
	 *
	 * Why all these "- 1"s?  Because 0 represents both the bottom
	 * of the address space and the top of it (using -1 for the
	 * top wouldn't help much: the masks would do the wrong thing).
	 * The rule is that addr 0 and floor 0 refer to the bottom of
	 * the address space, but end 0 and ceiling 0 refer to the top
	 * Comparisons need to use "end - 1" and "ceiling - 1" (though
	 * that end 0 case should be mythical).
	 *
	 * Wherever addr is brought up or ceiling brought down, we
	 * must be careful to reject "the opposite 0" before it
	 * confuses the subsequent tests.  But what about where end is
	 * brought down by HUGEPD_SIZE below? no, end can't go down to
	 * 0 there.
	 *
	 * Whereas we round start (addr) and ceiling down, by different
	 * masks at different levels, in order to test whether a table
	 * now has no other vmas using it, so can be freed, we don't
	 * bother to round floor or end up - the tests don't need that.
	 */
401
	unsigned int psize = get_slice_psize(tlb->mm, addr);
402

403
	addr &= HUGEPD_MASK(psize);
404
	if (addr < floor) {
405
		addr += HUGEPD_SIZE(psize);
406 407 408 409
		if (!addr)
			return;
	}
	if (ceiling) {
410
		ceiling &= HUGEPD_MASK(psize);
411 412 413 414
		if (!ceiling)
			return;
	}
	if (end - 1 > ceiling - 1)
415
		end -= HUGEPD_SIZE(psize);
416 417 418 419
	if (addr > end - 1)
		return;

	start = addr;
420
	pgd = pgd_offset(tlb->mm, addr);
421
	do {
422 423
		psize = get_slice_psize(tlb->mm, addr);
		BUG_ON(!mmu_huge_psizes[psize]);
424
		next = pgd_addr_end(addr, end);
425 426 427 428 429 430 431 432 433
		if (mmu_psize_to_shift(psize) < PUD_SHIFT) {
			if (pgd_none_or_clear_bad(pgd))
				continue;
			hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
		} else {
			if (pgd_none(*pgd))
				continue;
			free_hugepte_range(tlb, (hugepd_t *)pgd, psize);
		}
434
	} while (pgd++, addr = next, addr != end);
L
Linus Torvalds 已提交
435 436
}

437 438 439 440
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
		     pte_t *ptep, pte_t pte)
{
	if (pte_present(*ptep)) {
441
		/* We open-code pte_clear because we need to pass the right
442 443 444
		 * argument to hpte_need_flush (huge / !huge). Might not be
		 * necessary anymore if we make hpte_need_flush() get the
		 * page size from the slices
445
		 */
446 447 448 449 450
		unsigned int psize = get_slice_psize(mm, addr);
		unsigned int shift = mmu_psize_to_shift(psize);
		unsigned long sz = ((1UL) << shift);
		struct hstate *hstate = size_to_hstate(sz);
		pte_update(mm, addr & hstate->mask, ptep, ~0UL, 1);
451
	}
452
	*ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
L
Linus Torvalds 已提交
453 454
}

455 456
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
			      pte_t *ptep)
L
Linus Torvalds 已提交
457
{
458
	unsigned long old = pte_update(mm, addr, ptep, ~0UL, 1);
459
	return __pte(old);
L
Linus Torvalds 已提交
460 461 462 463 464 465 466
}

struct page *
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
{
	pte_t *ptep;
	struct page *page;
467
	unsigned int mmu_psize = get_slice_psize(mm, address);
L
Linus Torvalds 已提交
468

469 470
	/* Verify it is a huge page else bail. */
	if (!mmu_huge_psizes[mmu_psize])
L
Linus Torvalds 已提交
471 472 473 474
		return ERR_PTR(-EINVAL);

	ptep = huge_pte_offset(mm, address);
	page = pte_page(*ptep);
475 476 477 478 479
	if (page) {
		unsigned int shift = mmu_psize_to_shift(mmu_psize);
		unsigned long sz = ((1UL) << shift);
		page += (address % sz) / PAGE_SIZE;
	}
L
Linus Torvalds 已提交
480 481 482 483 484 485 486 487 488

	return page;
}

int pmd_huge(pmd_t pmd)
{
	return 0;
}

A
Andi Kleen 已提交
489 490 491 492 493
int pud_huge(pud_t pud)
{
	return 0;
}

L
Linus Torvalds 已提交
494 495 496 497 498 499 500 501 502 503 504 505 506
struct page *
follow_huge_pmd(struct mm_struct *mm, unsigned long address,
		pmd_t *pmd, int write)
{
	BUG();
	return NULL;
}


unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
					unsigned long len, unsigned long pgoff,
					unsigned long flags)
{
507 508
	struct hstate *hstate = hstate_file(file);
	int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
509 510 511

	if (!mmu_huge_psizes[mmu_psize])
		return -EINVAL;
512
	return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0);
L
Linus Torvalds 已提交
513 514
}

515 516 517 518 519 520 521
unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
{
	unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);

	return 1UL << mmu_psize_to_shift(psize);
}

522 523 524 525
/*
 * Called by asm hashtable.S for doing lazy icache flush
 */
static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags,
526
					pte_t pte, int trap, unsigned long sz)
527 528 529 530 531 532 533 534 535 536 537 538
{
	struct page *page;
	int i;

	if (!pfn_valid(pte_pfn(pte)))
		return rflags;

	page = pte_page(pte);

	/* page is dirty */
	if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
		if (trap == 0x400) {
539
			for (i = 0; i < (sz / PAGE_SIZE); i++)
540 541 542 543 544 545 546 547 548
				__flush_dcache_icache(page_address(page+i));
			set_bit(PG_arch_1, &page->flags);
		} else {
			rflags |= HPTE_R_N;
		}
	}
	return rflags;
}

L
Linus Torvalds 已提交
549
int hash_huge_page(struct mm_struct *mm, unsigned long access,
550 551
		   unsigned long ea, unsigned long vsid, int local,
		   unsigned long trap)
L
Linus Torvalds 已提交
552 553
{
	pte_t *ptep;
554
	unsigned long old_pte, new_pte;
555
	unsigned long va, rflags, pa, sz;
L
Linus Torvalds 已提交
556 557
	long slot;
	int err = 1;
P
Paul Mackerras 已提交
558
	int ssize = user_segment_size(ea);
559 560 561
	unsigned int mmu_psize;
	int shift;
	mmu_psize = get_slice_psize(mm, ea);
L
Linus Torvalds 已提交
562

563 564
	if (!mmu_huge_psizes[mmu_psize])
		goto out;
L
Linus Torvalds 已提交
565 566 567
	ptep = huge_pte_offset(mm, ea);

	/* Search the Linux page table for a match with va */
P
Paul Mackerras 已提交
568
	va = hpt_va(ea, vsid, ssize);
L
Linus Torvalds 已提交
569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595

	/*
	 * If no pte found or not present, send the problem up to
	 * do_page_fault
	 */
	if (unlikely(!ptep || pte_none(*ptep)))
		goto out;

	/* 
	 * Check the user's access rights to the page.  If access should be
	 * prevented then send the problem up to do_page_fault.
	 */
	if (unlikely(access & ~pte_val(*ptep)))
		goto out;
	/*
	 * At this point, we have a pte (old_pte) which can be used to build
	 * or update an HPTE. There are 2 cases:
	 *
	 * 1. There is a valid (present) pte with no associated HPTE (this is 
	 *	the most common case)
	 * 2. There is a valid (present) pte with an associated HPTE. The
	 *	current values of the pp bits in the HPTE prevent access
	 *	because we are doing software DIRTY bit management and the
	 *	page is currently not DIRTY. 
	 */


596 597 598 599
	do {
		old_pte = pte_val(*ptep);
		if (old_pte & _PAGE_BUSY)
			goto out;
600
		new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
601 602 603 604
	} while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
					 old_pte, new_pte));

	rflags = 0x2 | (!(new_pte & _PAGE_RW));
L
Linus Torvalds 已提交
605
 	/* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
606
	rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
607 608
	shift = mmu_psize_to_shift(mmu_psize);
	sz = ((1UL) << shift);
609 610 611 612
	if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
		/* No CPU has hugepages but lacks no execute, so we
		 * don't need to worry about that case */
		rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte),
613
						       trap, sz);
L
Linus Torvalds 已提交
614 615

	/* Check if pte already has an hpte (case 2) */
616
	if (unlikely(old_pte & _PAGE_HASHPTE)) {
L
Linus Torvalds 已提交
617 618 619
		/* There MIGHT be an HPTE for this pte */
		unsigned long hash, slot;

620
		hash = hpt_hash(va, shift, ssize);
621
		if (old_pte & _PAGE_F_SECOND)
L
Linus Torvalds 已提交
622 623
			hash = ~hash;
		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
624
		slot += (old_pte & _PAGE_F_GIX) >> 12;
L
Linus Torvalds 已提交
625

626
		if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_psize,
P
Paul Mackerras 已提交
627
					 ssize, local) == -1)
628
			old_pte &= ~_PAGE_HPTEFLAGS;
L
Linus Torvalds 已提交
629 630
	}

631
	if (likely(!(old_pte & _PAGE_HASHPTE))) {
632
		unsigned long hash = hpt_hash(va, shift, ssize);
L
Linus Torvalds 已提交
633 634
		unsigned long hpte_group;

635
		pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
L
Linus Torvalds 已提交
636 637 638 639 640

repeat:
		hpte_group = ((hash & htab_hash_mask) *
			      HPTES_PER_GROUP) & ~0x7UL;

641
		/* clear HPTE slot informations in new PTE */
642 643 644
#ifdef CONFIG_PPC_64K_PAGES
		new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HPTE_SUB0;
#else
645
		new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
646
#endif
L
Linus Torvalds 已提交
647
		/* Add in WIMG bits */
648 649
		rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
				      _PAGE_COHERENT | _PAGE_GUARDED));
L
Linus Torvalds 已提交
650

651 652
		/* Insert into the hash table, primary slot */
		slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0,
653
					  mmu_psize, ssize);
L
Linus Torvalds 已提交
654 655 656 657 658

		/* Primary is full, try the secondary */
		if (unlikely(slot == -1)) {
			hpte_group = ((~hash & htab_hash_mask) *
				      HPTES_PER_GROUP) & ~0x7UL; 
659
			slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags,
660
						  HPTE_V_SECONDARY,
661
						  mmu_psize, ssize);
L
Linus Torvalds 已提交
662 663
			if (slot == -1) {
				if (mftb() & 0x1)
664 665
					hpte_group = ((hash & htab_hash_mask) *
						      HPTES_PER_GROUP)&~0x7UL;
L
Linus Torvalds 已提交
666 667 668 669 670 671 672 673 674

				ppc_md.hpte_remove(hpte_group);
				goto repeat;
                        }
		}

		if (unlikely(slot == -2))
			panic("hash_huge_page: pte_insert failed\n");

I
Ishizaki Kou 已提交
675
		new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX);
L
Linus Torvalds 已提交
676 677
	}

678
	/*
H
Hugh Dickins 已提交
679
	 * No need to use ldarx/stdcx here
680 681 682
	 */
	*ptep = __pte(new_pte & ~_PAGE_BUSY);

L
Linus Torvalds 已提交
683 684 685 686 687
	err = 0;

 out:
	return err;
}
688

A
Al Viro 已提交
689
static void __init set_huge_psize(int psize)
690 691 692
{
	/* Check that it is a page size supported by the hardware and
	 * that it fits within pagetable limits. */
693 694
	if (mmu_psize_defs[psize].shift &&
		mmu_psize_defs[psize].shift < SID_SHIFT_1T &&
695
		(mmu_psize_defs[psize].shift > MIN_HUGEPTE_SHIFT ||
696 697
		 mmu_psize_defs[psize].shift == PAGE_SHIFT_64K ||
		 mmu_psize_defs[psize].shift == PAGE_SHIFT_16G)) {
698 699 700 701
		/* Return if huge page size has already been setup or is the
		 * same as the base page size. */
		if (mmu_huge_psizes[psize] ||
		   mmu_psize_defs[psize].shift == PAGE_SHIFT)
702
			return;
703
		hugetlb_add_hstate(mmu_psize_defs[psize].shift - PAGE_SHIFT);
704

705
		switch (mmu_psize_defs[psize].shift) {
706 707 708 709
		case PAGE_SHIFT_64K:
		    /* We only allow 64k hpages with 4k base page,
		     * which was checked above, and always put them
		     * at the PMD */
710
		    hugepte_shift[psize] = PMD_SHIFT;
711 712 713 714 715
		    break;
		case PAGE_SHIFT_16M:
		    /* 16M pages can be at two different levels
		     * of pagestables based on base page size */
		    if (PAGE_SHIFT == PAGE_SHIFT_64K)
716
			    hugepte_shift[psize] = PMD_SHIFT;
717
		    else /* 4k base page */
718
			    hugepte_shift[psize] = PUD_SHIFT;
719 720 721
		    break;
		case PAGE_SHIFT_16G:
		    /* 16G pages are always at PGD level */
722
		    hugepte_shift[psize] = PGDIR_SHIFT;
723 724
		    break;
		}
725
		hugepte_shift[psize] -= mmu_psize_defs[psize].shift;
726
	} else
727
		hugepte_shift[psize] = 0;
728 729 730 731 732
}

static int __init hugepage_setup_sz(char *str)
{
	unsigned long long size;
733
	int mmu_psize;
734 735 736 737 738
	int shift;

	size = memparse(str, &str);

	shift = __ffs(size);
739 740
	mmu_psize = shift_to_mmu_psize(shift);
	if (mmu_psize >= 0 && mmu_psize_defs[mmu_psize].shift)
741 742 743 744 745 746 747 748
		set_huge_psize(mmu_psize);
	else
		printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size);

	return 1;
}
__setup("hugepagesz=", hugepage_setup_sz);

749 750
static int __init hugetlbpage_init(void)
{
751 752
	unsigned int psize;

753 754
	if (!cpu_has_feature(CPU_FTR_16M_PAGE))
		return -ENODEV;
755

756 757 758 759 760 761 762
	/* Add supported huge page sizes.  Need to change HUGE_MAX_HSTATE
	 * and adjust PTE_NONCACHE_NUM if the number of supported huge page
	 * sizes changes.
	 */
	set_huge_psize(MMU_PAGE_16M);
	set_huge_psize(MMU_PAGE_16G);

763 764 765 766 767 768 769
	/* Temporarily disable support for 64K huge pages when 64K SPU local
	 * store support is enabled as the current implementation conflicts.
	 */
#ifndef CONFIG_SPU_FS_64K_LS
	set_huge_psize(MMU_PAGE_64K);
#endif

770 771
	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
		if (mmu_huge_psizes[psize]) {
772 773 774 775 776 777 778 779
			pgtable_cache[HUGE_PGTABLE_INDEX(psize)] =
				kmem_cache_create(
					HUGEPTE_CACHE_NAME(psize),
					HUGEPTE_TABLE_SIZE(psize),
					HUGEPTE_TABLE_SIZE(psize),
					0,
					NULL);
			if (!pgtable_cache[HUGE_PGTABLE_INDEX(psize)])
780 781 782 783
				panic("hugetlbpage_init(): could not create %s"\
				      "\n", HUGEPTE_CACHE_NAME(psize));
		}
	}
784 785 786 787 788

	return 0;
}

module_init(hugetlbpage_init);