hugetlbpage.c 17.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * PPC64 (POWER4) Huge TLB Page Support for Kernel.
 *
 * Copyright (C) 2003 David Gibson, IBM Corporation.
 *
 * Based on the IA-32 version:
 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
 */

#include <linux/init.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/sysctl.h>
#include <asm/mman.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/machdep.h>
#include <asm/cputable.h>
25
#include <asm/spu.h>
L
Linus Torvalds 已提交
26

27 28 29
#define PAGE_SHIFT_64K	16
#define PAGE_SHIFT_16M	24
#define PAGE_SHIFT_16G	34
30

31 32
#define NUM_LOW_AREAS	(0x100000000UL >> SID_SHIFT)
#define NUM_HIGH_AREAS	(PGTABLE_RANGE >> HTLB_AREA_SHIFT)
33 34 35 36 37 38
#define MAX_NUMBER_GPAGES	1024

/* Tracks the 16G pages after the device tree is scanned and before the
 * huge_boot_pages list is ready.  */
static unsigned long gpage_freearray[MAX_NUMBER_GPAGES];
static unsigned nr_gpages;
39

40 41 42 43
/* Flag to mark huge PD pointers.  This means pmd_bad() and pud_bad()
 * will choke on pointers to hugepte tables, which is handy for
 * catching screwups early. */

44 45
static inline int shift_to_mmu_psize(unsigned int shift)
{
46 47 48 49 50
	int psize;

	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
		if (mmu_psize_defs[psize].shift == shift)
			return psize;
51 52 53 54 55 56 57 58 59 60
	return -1;
}

static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
{
	if (mmu_psize_defs[mmu_psize].shift)
		return mmu_psize_defs[mmu_psize].shift;
	BUG();
}

61 62
#define hugepd_none(hpd)	((hpd).pd == 0)

63 64
static inline pte_t *hugepd_page(hugepd_t hpd)
{
65 66 67 68 69 70 71
	BUG_ON(!hugepd_ok(hpd));
	return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | 0xc000000000000000);
}

static inline unsigned int hugepd_shift(hugepd_t hpd)
{
	return hpd.pd & HUGEPD_SHIFT_MASK;
72 73
}

74
static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr, unsigned pdshift)
75
{
76
	unsigned long idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(*hpdp);
77 78 79 80 81
	pte_t *dir = hugepd_page(*hpdp);

	return dir + idx;
}

82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift)
{
	pgd_t *pg;
	pud_t *pu;
	pmd_t *pm;
	hugepd_t *hpdp = NULL;
	unsigned pdshift = PGDIR_SHIFT;

	if (shift)
		*shift = 0;

	pg = pgdir + pgd_index(ea);
	if (is_hugepd(pg)) {
		hpdp = (hugepd_t *)pg;
	} else if (!pgd_none(*pg)) {
		pdshift = PUD_SHIFT;
		pu = pud_offset(pg, ea);
		if (is_hugepd(pu))
			hpdp = (hugepd_t *)pu;
		else if (!pud_none(*pu)) {
			pdshift = PMD_SHIFT;
			pm = pmd_offset(pu, ea);
			if (is_hugepd(pm))
				hpdp = (hugepd_t *)pm;
			else if (!pmd_none(*pm)) {
				return pte_offset_map(pm, ea);
			}
		}
	}

	if (!hpdp)
		return NULL;

	if (shift)
		*shift = hugepd_shift(*hpdp);
	return hugepte_offset(hpdp, ea, pdshift);
}

pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
	return find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
}

125
static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
126
			   unsigned long address, unsigned pdshift, unsigned pshift)
127
{
128
	pte_t *new = kmem_cache_zalloc(PGT_CACHE(pdshift - pshift),
129
				       GFP_KERNEL|__GFP_REPEAT);
130

131 132 133
	BUG_ON(pshift > HUGEPD_SHIFT_MASK);
	BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);

134 135 136 137 138
	if (! new)
		return -ENOMEM;

	spin_lock(&mm->page_table_lock);
	if (!hugepd_none(*hpdp))
139
		kmem_cache_free(PGT_CACHE(pdshift - pshift), new);
140
	else
141
		hpdp->pd = ((unsigned long)new & ~0x8000000000000000) | pshift;
142 143 144 145
	spin_unlock(&mm->page_table_lock);
	return 0;
}

146
pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
147
{
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
	pgd_t *pg;
	pud_t *pu;
	pmd_t *pm;
	hugepd_t *hpdp = NULL;
	unsigned pshift = __ffs(sz);
	unsigned pdshift = PGDIR_SHIFT;

	addr &= ~(sz-1);

	pg = pgd_offset(mm, addr);
	if (pshift >= PUD_SHIFT) {
		hpdp = (hugepd_t *)pg;
	} else {
		pdshift = PUD_SHIFT;
		pu = pud_alloc(mm, pg, addr);
		if (pshift >= PMD_SHIFT) {
			hpdp = (hugepd_t *)pu;
		} else {
			pdshift = PMD_SHIFT;
			pm = pmd_alloc(mm, pu, addr);
			hpdp = (hugepd_t *)pm;
		}
	}

	if (!hpdp)
		return NULL;

	BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));

	if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
		return NULL;

	return hugepte_offset(hpdp, addr, pdshift);
181 182
}

183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198
/* Build list of addresses of gigantic pages.  This function is used in early
 * boot before the buddy or bootmem allocator is setup.
 */
void add_gpage(unsigned long addr, unsigned long page_size,
	unsigned long number_of_pages)
{
	if (!addr)
		return;
	while (number_of_pages > 0) {
		gpage_freearray[nr_gpages] = addr;
		nr_gpages++;
		number_of_pages--;
		addr += page_size;
	}
}

199
/* Moves the gigantic page addresses from the temporary list to the
200 201 202
 * huge_boot_pages list.
 */
int alloc_bootmem_huge_page(struct hstate *hstate)
203 204 205 206 207 208 209
{
	struct huge_bootmem_page *m;
	if (nr_gpages == 0)
		return 0;
	m = phys_to_virt(gpage_freearray[--nr_gpages]);
	gpage_freearray[nr_gpages] = 0;
	list_add(&m->list, &huge_boot_pages);
210
	m->hstate = hstate;
211 212 213
	return 1;
}

214 215 216 217 218
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
	return 0;
}

219 220 221
static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
			      unsigned long start, unsigned long end,
			      unsigned long floor, unsigned long ceiling)
222 223
{
	pte_t *hugepte = hugepd_page(*hpdp);
224 225 226 227 228 229 230 231 232 233 234 235 236
	unsigned shift = hugepd_shift(*hpdp);
	unsigned long pdmask = ~((1UL << pdshift) - 1);

	start &= pdmask;
	if (start < floor)
		return;
	if (ceiling) {
		ceiling &= pdmask;
		if (! ceiling)
			return;
	}
	if (end - 1 > ceiling - 1)
		return;
237 238 239

	hpdp->pd = 0;
	tlb->need_flush = 1;
240
	pgtable_free_tlb(tlb, hugepte, pdshift - shift);
241 242 243 244
}

static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
				   unsigned long addr, unsigned long end,
245
				   unsigned long floor, unsigned long ceiling)
246 247 248 249 250 251 252 253 254 255 256
{
	pmd_t *pmd;
	unsigned long next;
	unsigned long start;

	start = addr;
	pmd = pmd_offset(pud, addr);
	do {
		next = pmd_addr_end(addr, end);
		if (pmd_none(*pmd))
			continue;
257 258
		free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
				  addr, next, floor, ceiling);
259 260 261 262 263 264 265 266 267
	} while (pmd++, addr = next, addr != end);

	start &= PUD_MASK;
	if (start < floor)
		return;
	if (ceiling) {
		ceiling &= PUD_MASK;
		if (!ceiling)
			return;
L
Linus Torvalds 已提交
268
	}
269 270
	if (end - 1 > ceiling - 1)
		return;
L
Linus Torvalds 已提交
271

272 273
	pmd = pmd_offset(pud, start);
	pud_clear(pud);
274
	pmd_free_tlb(tlb, pmd, start);
275 276 277 278 279 280 281 282 283 284 285 286 287 288
}

static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
				   unsigned long addr, unsigned long end,
				   unsigned long floor, unsigned long ceiling)
{
	pud_t *pud;
	unsigned long next;
	unsigned long start;

	start = addr;
	pud = pud_offset(pgd, addr);
	do {
		next = pud_addr_end(addr, end);
289
		if (!is_hugepd(pud)) {
290 291
			if (pud_none_or_clear_bad(pud))
				continue;
292
			hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
293
					       ceiling);
294
		} else {
295 296
			free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
					  addr, next, floor, ceiling);
297
		}
298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
	} while (pud++, addr = next, addr != end);

	start &= PGDIR_MASK;
	if (start < floor)
		return;
	if (ceiling) {
		ceiling &= PGDIR_MASK;
		if (!ceiling)
			return;
	}
	if (end - 1 > ceiling - 1)
		return;

	pud = pud_offset(pgd, start);
	pgd_clear(pgd);
313
	pud_free_tlb(tlb, pud, start);
314 315 316 317 318 319 320
}

/*
 * This function frees user-level page tables of a process.
 *
 * Must be called with pagetable lock held.
 */
321
void hugetlb_free_pgd_range(struct mmu_gather *tlb,
322 323 324 325 326 327 328
			    unsigned long addr, unsigned long end,
			    unsigned long floor, unsigned long ceiling)
{
	pgd_t *pgd;
	unsigned long next;

	/*
329 330 331 332 333 334 335 336 337 338
	 * Because there are a number of different possible pagetable
	 * layouts for hugepage ranges, we limit knowledge of how
	 * things should be laid out to the allocation path
	 * (huge_pte_alloc(), above).  Everything else works out the
	 * structure as it goes from information in the hugepd
	 * pointers.  That means that we can't here use the
	 * optimization used in the normal page free_pgd_range(), of
	 * checking whether we're actually covering a large enough
	 * range to have to do anything at the top level of the walk
	 * instead of at the bottom.
339
	 *
340 341 342
	 * To make sense of this, you should probably go read the big
	 * block comment at the top of the normal free_pgd_range(),
	 * too.
343 344
	 */

345
	pgd = pgd_offset(tlb->mm, addr);
346 347
	do {
		next = pgd_addr_end(addr, end);
348
		if (!is_hugepd(pgd)) {
349 350 351 352
			if (pgd_none_or_clear_bad(pgd))
				continue;
			hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
		} else {
353 354
			free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
					  addr, next, floor, ceiling);
355
		}
356
	} while (pgd++, addr = next, addr != end);
L
Linus Torvalds 已提交
357 358
}

359 360 361 362
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
		     pte_t *ptep, pte_t pte)
{
	if (pte_present(*ptep)) {
363
		/* We open-code pte_clear because we need to pass the right
364 365 366
		 * argument to hpte_need_flush (huge / !huge). Might not be
		 * necessary anymore if we make hpte_need_flush() get the
		 * page size from the slices
367
		 */
368
		pte_update(mm, addr, ptep, ~0UL, 1);
369
	}
370
	*ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
L
Linus Torvalds 已提交
371 372
}

373 374
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
			      pte_t *ptep)
L
Linus Torvalds 已提交
375
{
376
	unsigned long old = pte_update(mm, addr, ptep, ~0UL, 1);
377
	return __pte(old);
L
Linus Torvalds 已提交
378 379 380 381 382 383 384
}

struct page *
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
{
	pte_t *ptep;
	struct page *page;
385 386 387 388
	unsigned shift;
	unsigned long mask;

	ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
L
Linus Torvalds 已提交
389

390
	/* Verify it is a huge page else bail. */
391
	if (!ptep || !shift)
L
Linus Torvalds 已提交
392 393
		return ERR_PTR(-EINVAL);

394
	mask = (1UL << shift) - 1;
L
Linus Torvalds 已提交
395
	page = pte_page(*ptep);
396 397
	if (page)
		page += (address & mask) / PAGE_SIZE;
L
Linus Torvalds 已提交
398 399 400 401 402 403 404 405 406

	return page;
}

int pmd_huge(pmd_t pmd)
{
	return 0;
}

A
Andi Kleen 已提交
407 408 409 410 411
int pud_huge(pud_t pud)
{
	return 0;
}

L
Linus Torvalds 已提交
412 413 414 415 416 417 418 419
struct page *
follow_huge_pmd(struct mm_struct *mm, unsigned long address,
		pmd_t *pmd, int write)
{
	BUG();
	return NULL;
}

420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
static noinline int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
		       unsigned long end, int write, struct page **pages, int *nr)
{
	unsigned long mask;
	unsigned long pte_end;
	struct page *head, *page;
	pte_t pte;
	int refs;

	pte_end = (addr + sz) & ~(sz-1);
	if (pte_end < end)
		end = pte_end;

	pte = *ptep;
	mask = _PAGE_PRESENT | _PAGE_USER;
	if (write)
		mask |= _PAGE_RW;

	if ((pte_val(pte) & mask) != mask)
		return 0;

	/* hugepages are never "special" */
	VM_BUG_ON(!pfn_valid(pte_pfn(pte)));

	refs = 0;
	head = pte_page(pte);

	page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
	do {
		VM_BUG_ON(compound_head(page) != head);
		pages[*nr] = page;
		(*nr)++;
		page++;
		refs++;
	} while (addr += PAGE_SIZE, addr != end);

	if (!page_cache_add_speculative(head, refs)) {
		*nr -= refs;
		return 0;
	}

	if (unlikely(pte_val(pte) != pte_val(*ptep))) {
		/* Could be optimized better */
		while (*nr) {
			put_page(page);
			(*nr)--;
		}
	}

	return 1;
}

int gup_hugepd(hugepd_t *hugepd, unsigned pdshift,
	       unsigned long addr, unsigned long end,
	       int write, struct page **pages, int *nr)
{
	pte_t *ptep;
	unsigned long sz = 1UL << hugepd_shift(*hugepd);

	ptep = hugepte_offset(hugepd, addr, pdshift);
	do {
		if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
			return 0;
	} while (ptep++, addr += sz, addr != end);

	return 1;
}
L
Linus Torvalds 已提交
487 488 489 490 491

unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
					unsigned long len, unsigned long pgoff,
					unsigned long flags)
{
492 493
	struct hstate *hstate = hstate_file(file);
	int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
494

495
	return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0);
L
Linus Torvalds 已提交
496 497
}

498 499 500 501 502 503 504
unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
{
	unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);

	return 1UL << mmu_psize_to_shift(psize);
}

505 506 507 508
/*
 * Called by asm hashtable.S for doing lazy icache flush
 */
static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags,
509
					pte_t pte, int trap, unsigned long sz)
510 511 512 513 514 515 516 517 518 519 520 521
{
	struct page *page;
	int i;

	if (!pfn_valid(pte_pfn(pte)))
		return rflags;

	page = pte_page(pte);

	/* page is dirty */
	if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
		if (trap == 0x400) {
522
			for (i = 0; i < (sz / PAGE_SIZE); i++)
523 524 525 526 527 528 529 530 531
				__flush_dcache_icache(page_address(page+i));
			set_bit(PG_arch_1, &page->flags);
		} else {
			rflags |= HPTE_R_N;
		}
	}
	return rflags;
}

532 533 534
int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
		     pte_t *ptep, unsigned long trap, int local, int ssize,
		     unsigned int shift, unsigned int mmu_psize)
L
Linus Torvalds 已提交
535
{
536
	unsigned long old_pte, new_pte;
537
	unsigned long va, rflags, pa, sz;
L
Linus Torvalds 已提交
538 539 540
	long slot;
	int err = 1;

541
	BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);
L
Linus Torvalds 已提交
542 543

	/* Search the Linux page table for a match with va */
P
Paul Mackerras 已提交
544
	va = hpt_va(ea, vsid, ssize);
L
Linus Torvalds 已提交
545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564

	/* 
	 * Check the user's access rights to the page.  If access should be
	 * prevented then send the problem up to do_page_fault.
	 */
	if (unlikely(access & ~pte_val(*ptep)))
		goto out;
	/*
	 * At this point, we have a pte (old_pte) which can be used to build
	 * or update an HPTE. There are 2 cases:
	 *
	 * 1. There is a valid (present) pte with no associated HPTE (this is 
	 *	the most common case)
	 * 2. There is a valid (present) pte with an associated HPTE. The
	 *	current values of the pp bits in the HPTE prevent access
	 *	because we are doing software DIRTY bit management and the
	 *	page is currently not DIRTY. 
	 */


565 566 567 568
	do {
		old_pte = pte_val(*ptep);
		if (old_pte & _PAGE_BUSY)
			goto out;
569
		new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
570 571 572 573
	} while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
					 old_pte, new_pte));

	rflags = 0x2 | (!(new_pte & _PAGE_RW));
L
Linus Torvalds 已提交
574
 	/* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
575
	rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
576
	sz = ((1UL) << shift);
577 578 579 580
	if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
		/* No CPU has hugepages but lacks no execute, so we
		 * don't need to worry about that case */
		rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte),
581
						       trap, sz);
L
Linus Torvalds 已提交
582 583

	/* Check if pte already has an hpte (case 2) */
584
	if (unlikely(old_pte & _PAGE_HASHPTE)) {
L
Linus Torvalds 已提交
585 586 587
		/* There MIGHT be an HPTE for this pte */
		unsigned long hash, slot;

588
		hash = hpt_hash(va, shift, ssize);
589
		if (old_pte & _PAGE_F_SECOND)
L
Linus Torvalds 已提交
590 591
			hash = ~hash;
		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
592
		slot += (old_pte & _PAGE_F_GIX) >> 12;
L
Linus Torvalds 已提交
593

594
		if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_psize,
P
Paul Mackerras 已提交
595
					 ssize, local) == -1)
596
			old_pte &= ~_PAGE_HPTEFLAGS;
L
Linus Torvalds 已提交
597 598
	}

599
	if (likely(!(old_pte & _PAGE_HASHPTE))) {
600
		unsigned long hash = hpt_hash(va, shift, ssize);
L
Linus Torvalds 已提交
601 602
		unsigned long hpte_group;

603
		pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
L
Linus Torvalds 已提交
604 605 606 607 608

repeat:
		hpte_group = ((hash & htab_hash_mask) *
			      HPTES_PER_GROUP) & ~0x7UL;

609
		/* clear HPTE slot informations in new PTE */
610 611 612
#ifdef CONFIG_PPC_64K_PAGES
		new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HPTE_SUB0;
#else
613
		new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
614
#endif
L
Linus Torvalds 已提交
615
		/* Add in WIMG bits */
616 617
		rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
				      _PAGE_COHERENT | _PAGE_GUARDED));
L
Linus Torvalds 已提交
618

619 620
		/* Insert into the hash table, primary slot */
		slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0,
621
					  mmu_psize, ssize);
L
Linus Torvalds 已提交
622 623 624 625 626

		/* Primary is full, try the secondary */
		if (unlikely(slot == -1)) {
			hpte_group = ((~hash & htab_hash_mask) *
				      HPTES_PER_GROUP) & ~0x7UL; 
627
			slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags,
628
						  HPTE_V_SECONDARY,
629
						  mmu_psize, ssize);
L
Linus Torvalds 已提交
630 631
			if (slot == -1) {
				if (mftb() & 0x1)
632 633
					hpte_group = ((hash & htab_hash_mask) *
						      HPTES_PER_GROUP)&~0x7UL;
L
Linus Torvalds 已提交
634 635 636 637 638 639 640 641 642

				ppc_md.hpte_remove(hpte_group);
				goto repeat;
                        }
		}

		if (unlikely(slot == -2))
			panic("hash_huge_page: pte_insert failed\n");

I
Ishizaki Kou 已提交
643
		new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX);
L
Linus Torvalds 已提交
644 645
	}

646
	/*
H
Hugh Dickins 已提交
647
	 * No need to use ldarx/stdcx here
648 649 650
	 */
	*ptep = __pte(new_pte & ~_PAGE_BUSY);

L
Linus Torvalds 已提交
651 652 653 654 655
	err = 0;

 out:
	return err;
}
656

657
static int __init add_huge_page_size(unsigned long long size)
658
{
659 660
	int shift = __ffs(size);
	int mmu_psize;
661

662
	/* Check that it is a page size supported by the hardware and
663 664 665 666
	 * that it fits within pagetable and slice limits. */
	if (!is_power_of_2(size)
	    || (shift > SLICE_HIGH_SHIFT) || (shift <= PAGE_SHIFT))
		return -EINVAL;
667

668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687
	if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
		return -EINVAL;

#ifdef CONFIG_SPU_FS_64K_LS
	/* Disable support for 64K huge pages when 64K SPU local store
	 * support is enabled as the current implementation conflicts.
	 */
	if (shift == PAGE_SHIFT_64K)
		return -EINVAL;
#endif /* CONFIG_SPU_FS_64K_LS */

	BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);

	/* Return if huge page size has already been setup */
	if (size_to_hstate(size))
		return 0;

	hugetlb_add_hstate(shift - PAGE_SHIFT);

	return 0;
688 689 690 691 692 693 694 695
}

static int __init hugepage_setup_sz(char *str)
{
	unsigned long long size;

	size = memparse(str, &str);

696
	if (add_huge_page_size(size) != 0)
697 698 699 700 701 702
		printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size);

	return 1;
}
__setup("hugepagesz=", hugepage_setup_sz);

703 704
static int __init hugetlbpage_init(void)
{
705
	int psize;
706

707 708
	if (!cpu_has_feature(CPU_FTR_16M_PAGE))
		return -ENODEV;
709

710 711 712
	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
		unsigned shift;
		unsigned pdshift;
713

714 715
		if (!mmu_psize_defs[psize].shift)
			continue;
716

717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732
		shift = mmu_psize_to_shift(psize);

		if (add_huge_page_size(1ULL << shift) < 0)
			continue;

		if (shift < PMD_SHIFT)
			pdshift = PMD_SHIFT;
		else if (shift < PUD_SHIFT)
			pdshift = PUD_SHIFT;
		else
			pdshift = PGDIR_SHIFT;

		pgtable_cache_add(pdshift - shift, NULL);
		if (!PGT_CACHE(pdshift - shift))
			panic("hugetlbpage_init(): could not create "
			      "pgtable cache for %d bit pagesize\n", shift);
733
	}
734

735 736 737 738 739 740 741 742
	/* Set default large page size. Currently, we pick 16M or 1M
	 * depending on what is available
	 */
	if (mmu_psize_defs[MMU_PAGE_16M].shift)
		HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
	else if (mmu_psize_defs[MMU_PAGE_1M].shift)
		HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;

743 744 745 746
	return 0;
}

module_init(hugetlbpage_init);