hugetlbpage.c 15.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * PPC64 (POWER4) Huge TLB Page Support for Kernel.
 *
 * Copyright (C) 2003 David Gibson, IBM Corporation.
 *
 * Based on the IA-32 version:
 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
 */

#include <linux/init.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/sysctl.h>
#include <asm/mman.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/machdep.h>
#include <asm/cputable.h>
25
#include <asm/spu.h>
L
Linus Torvalds 已提交
26

27 28 29
#define HPAGE_SHIFT_64K	16
#define HPAGE_SHIFT_16M	24

30 31 32
#define NUM_LOW_AREAS	(0x100000000UL >> SID_SHIFT)
#define NUM_HIGH_AREAS	(PGTABLE_RANGE >> HTLB_AREA_SHIFT)

33 34 35
unsigned int hugepte_shift;
#define PTRS_PER_HUGEPTE	(1 << hugepte_shift)
#define HUGEPTE_TABLE_SIZE	(sizeof(pte_t) << hugepte_shift)
36

37
#define HUGEPD_SHIFT		(HPAGE_SHIFT + hugepte_shift)
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
#define HUGEPD_SIZE		(1UL << HUGEPD_SHIFT)
#define HUGEPD_MASK		(~(HUGEPD_SIZE-1))

#define huge_pgtable_cache	(pgtable_cache[HUGEPTE_CACHE_NUM])

/* Flag to mark huge PD pointers.  This means pmd_bad() and pud_bad()
 * will choke on pointers to hugepte tables, which is handy for
 * catching screwups early. */
#define HUGEPD_OK	0x1

typedef struct { unsigned long pd; } hugepd_t;

#define hugepd_none(hpd)	((hpd).pd == 0)

static inline pte_t *hugepd_page(hugepd_t hpd)
{
	BUG_ON(!(hpd.pd & HUGEPD_OK));
	return (pte_t *)(hpd.pd & ~HUGEPD_OK);
}

static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr)
{
	unsigned long idx = ((addr >> HPAGE_SHIFT) & (PTRS_PER_HUGEPTE-1));
	pte_t *dir = hugepd_page(*hpdp);

	return dir + idx;
}

static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
			   unsigned long address)
{
	pte_t *new = kmem_cache_alloc(huge_pgtable_cache,
				      GFP_KERNEL|__GFP_REPEAT);

	if (! new)
		return -ENOMEM;

	spin_lock(&mm->page_table_lock);
	if (!hugepd_none(*hpdp))
		kmem_cache_free(huge_pgtable_cache, new);
	else
		hpdp->pd = (unsigned long)new | HUGEPD_OK;
	spin_unlock(&mm->page_table_lock);
	return 0;
}

84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
/* Base page size affects how we walk hugetlb page tables */
#ifdef CONFIG_PPC_64K_PAGES
#define hpmd_offset(pud, addr)		pmd_offset(pud, addr)
#define hpmd_alloc(mm, pud, addr)	pmd_alloc(mm, pud, addr)
#else
static inline
pmd_t *hpmd_offset(pud_t *pud, unsigned long addr)
{
	if (HPAGE_SHIFT == HPAGE_SHIFT_64K)
		return pmd_offset(pud, addr);
	else
		return (pmd_t *) pud;
}
static inline
pmd_t *hpmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr)
{
	if (HPAGE_SHIFT == HPAGE_SHIFT_64K)
		return pmd_alloc(mm, pud, addr);
	else
		return (pmd_t *) pud;
}
#endif

107 108
/* Modelled after find_linux_pte() */
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
L
Linus Torvalds 已提交
109
{
110 111
	pgd_t *pg;
	pud_t *pu;
112
	pmd_t *pm;
L
Linus Torvalds 已提交
113

114
	BUG_ON(get_slice_psize(mm, addr) != mmu_huge_psize);
L
Linus Torvalds 已提交
115

116 117 118 119 120 121
	addr &= HPAGE_MASK;

	pg = pgd_offset(mm, addr);
	if (!pgd_none(*pg)) {
		pu = pud_offset(pg, addr);
		if (!pud_none(*pu)) {
122
			pm = hpmd_offset(pu, addr);
123 124
			if (!pmd_none(*pm))
				return hugepte_offset((hugepd_t *)pm, addr);
125 126
		}
	}
L
Linus Torvalds 已提交
127

128
	return NULL;
L
Linus Torvalds 已提交
129 130
}

131 132
pte_t *huge_pte_alloc(struct mm_struct *mm,
			unsigned long addr, unsigned long sz)
L
Linus Torvalds 已提交
133
{
134 135
	pgd_t *pg;
	pud_t *pu;
136
	pmd_t *pm;
137
	hugepd_t *hpdp = NULL;
L
Linus Torvalds 已提交
138

139
	BUG_ON(get_slice_psize(mm, addr) != mmu_huge_psize);
L
Linus Torvalds 已提交
140

141
	addr &= HPAGE_MASK;
L
Linus Torvalds 已提交
142

143 144
	pg = pgd_offset(mm, addr);
	pu = pud_alloc(mm, pg, addr);
L
Linus Torvalds 已提交
145

146
	if (pu) {
147
		pm = hpmd_alloc(mm, pu, addr);
148 149 150 151 152 153 154 155 156 157 158 159 160
		if (pm)
			hpdp = (hugepd_t *)pm;
	}

	if (! hpdp)
		return NULL;

	if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr))
		return NULL;

	return hugepte_offset(hpdp, addr);
}

161 162 163 164 165
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
	return 0;
}

166 167 168 169 170 171 172
static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp)
{
	pte_t *hugepte = hugepd_page(*hpdp);

	hpdp->pd = 0;
	tlb->need_flush = 1;
	pgtable_free_tlb(tlb, pgtable_free_cache(hugepte, HUGEPTE_CACHE_NUM,
A
Adam Litke 已提交
173
						 PGF_CACHENUM_MASK));
174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
}

static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
				   unsigned long addr, unsigned long end,
				   unsigned long floor, unsigned long ceiling)
{
	pmd_t *pmd;
	unsigned long next;
	unsigned long start;

	start = addr;
	pmd = pmd_offset(pud, addr);
	do {
		next = pmd_addr_end(addr, end);
		if (pmd_none(*pmd))
			continue;
		free_hugepte_range(tlb, (hugepd_t *)pmd);
	} while (pmd++, addr = next, addr != end);

	start &= PUD_MASK;
	if (start < floor)
		return;
	if (ceiling) {
		ceiling &= PUD_MASK;
		if (!ceiling)
			return;
L
Linus Torvalds 已提交
200
	}
201 202
	if (end - 1 > ceiling - 1)
		return;
L
Linus Torvalds 已提交
203

204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
	pmd = pmd_offset(pud, start);
	pud_clear(pud);
	pmd_free_tlb(tlb, pmd);
}

static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
				   unsigned long addr, unsigned long end,
				   unsigned long floor, unsigned long ceiling)
{
	pud_t *pud;
	unsigned long next;
	unsigned long start;

	start = addr;
	pud = pud_offset(pgd, addr);
	do {
		next = pud_addr_end(addr, end);
#ifdef CONFIG_PPC_64K_PAGES
		if (pud_none_or_clear_bad(pud))
			continue;
		hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling);
#else
226 227 228 229 230 231 232 233 234
		if (HPAGE_SHIFT == HPAGE_SHIFT_64K) {
			if (pud_none_or_clear_bad(pud))
				continue;
			hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling);
		} else {
			if (pud_none(*pud))
				continue;
			free_hugepte_range(tlb, (hugepd_t *)pud);
		}
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
#endif
	} while (pud++, addr = next, addr != end);

	start &= PGDIR_MASK;
	if (start < floor)
		return;
	if (ceiling) {
		ceiling &= PGDIR_MASK;
		if (!ceiling)
			return;
	}
	if (end - 1 > ceiling - 1)
		return;

	pud = pud_offset(pgd, start);
	pgd_clear(pgd);
	pud_free_tlb(tlb, pud);
}

/*
 * This function frees user-level page tables of a process.
 *
 * Must be called with pagetable lock held.
 */
259
void hugetlb_free_pgd_range(struct mmu_gather *tlb,
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
			    unsigned long addr, unsigned long end,
			    unsigned long floor, unsigned long ceiling)
{
	pgd_t *pgd;
	unsigned long next;
	unsigned long start;

	/*
	 * Comments below take from the normal free_pgd_range().  They
	 * apply here too.  The tests against HUGEPD_MASK below are
	 * essential, because we *don't* test for this at the bottom
	 * level.  Without them we'll attempt to free a hugepte table
	 * when we unmap just part of it, even if there are other
	 * active mappings using it.
	 *
	 * The next few lines have given us lots of grief...
	 *
	 * Why are we testing HUGEPD* at this top level?  Because
	 * often there will be no work to do at all, and we'd prefer
	 * not to go all the way down to the bottom just to discover
	 * that.
	 *
	 * Why all these "- 1"s?  Because 0 represents both the bottom
	 * of the address space and the top of it (using -1 for the
	 * top wouldn't help much: the masks would do the wrong thing).
	 * The rule is that addr 0 and floor 0 refer to the bottom of
	 * the address space, but end 0 and ceiling 0 refer to the top
	 * Comparisons need to use "end - 1" and "ceiling - 1" (though
	 * that end 0 case should be mythical).
	 *
	 * Wherever addr is brought up or ceiling brought down, we
	 * must be careful to reject "the opposite 0" before it
	 * confuses the subsequent tests.  But what about where end is
	 * brought down by HUGEPD_SIZE below? no, end can't go down to
	 * 0 there.
	 *
	 * Whereas we round start (addr) and ceiling down, by different
	 * masks at different levels, in order to test whether a table
	 * now has no other vmas using it, so can be freed, we don't
	 * bother to round floor or end up - the tests don't need that.
	 */

	addr &= HUGEPD_MASK;
	if (addr < floor) {
		addr += HUGEPD_SIZE;
		if (!addr)
			return;
	}
	if (ceiling) {
		ceiling &= HUGEPD_MASK;
		if (!ceiling)
			return;
	}
	if (end - 1 > ceiling - 1)
		end -= HUGEPD_SIZE;
	if (addr > end - 1)
		return;

	start = addr;
319
	pgd = pgd_offset(tlb->mm, addr);
320
	do {
321
		BUG_ON(get_slice_psize(tlb->mm, addr) != mmu_huge_psize);
322 323 324
		next = pgd_addr_end(addr, end);
		if (pgd_none_or_clear_bad(pgd))
			continue;
325
		hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
326
	} while (pgd++, addr = next, addr != end);
L
Linus Torvalds 已提交
327 328
}

329 330 331 332
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
		     pte_t *ptep, pte_t pte)
{
	if (pte_present(*ptep)) {
333
		/* We open-code pte_clear because we need to pass the right
334 335 336
		 * argument to hpte_need_flush (huge / !huge). Might not be
		 * necessary anymore if we make hpte_need_flush() get the
		 * page size from the slices
337
		 */
338
		pte_update(mm, addr & HPAGE_MASK, ptep, ~0UL, 1);
339
	}
340
	*ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
L
Linus Torvalds 已提交
341 342
}

343 344
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
			      pte_t *ptep)
L
Linus Torvalds 已提交
345
{
346
	unsigned long old = pte_update(mm, addr, ptep, ~0UL, 1);
347
	return __pte(old);
L
Linus Torvalds 已提交
348 349 350 351 352 353 354 355
}

struct page *
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
{
	pte_t *ptep;
	struct page *page;

356
	if (get_slice_psize(mm, address) != mmu_huge_psize)
L
Linus Torvalds 已提交
357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
		return ERR_PTR(-EINVAL);

	ptep = huge_pte_offset(mm, address);
	page = pte_page(*ptep);
	if (page)
		page += (address % HPAGE_SIZE) / PAGE_SIZE;

	return page;
}

int pmd_huge(pmd_t pmd)
{
	return 0;
}

struct page *
follow_huge_pmd(struct mm_struct *mm, unsigned long address,
		pmd_t *pmd, int write)
{
	BUG();
	return NULL;
}


unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
					unsigned long len, unsigned long pgoff,
					unsigned long flags)
{
385 386
	return slice_get_unmapped_area(addr, len, flags,
				       mmu_huge_psize, 1, 0);
L
Linus Torvalds 已提交
387 388
}

389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415
/*
 * Called by asm hashtable.S for doing lazy icache flush
 */
static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags,
						  pte_t pte, int trap)
{
	struct page *page;
	int i;

	if (!pfn_valid(pte_pfn(pte)))
		return rflags;

	page = pte_page(pte);

	/* page is dirty */
	if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
		if (trap == 0x400) {
			for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++)
				__flush_dcache_icache(page_address(page+i));
			set_bit(PG_arch_1, &page->flags);
		} else {
			rflags |= HPTE_R_N;
		}
	}
	return rflags;
}

L
Linus Torvalds 已提交
416
int hash_huge_page(struct mm_struct *mm, unsigned long access,
417 418
		   unsigned long ea, unsigned long vsid, int local,
		   unsigned long trap)
L
Linus Torvalds 已提交
419 420
{
	pte_t *ptep;
421 422
	unsigned long old_pte, new_pte;
	unsigned long va, rflags, pa;
L
Linus Torvalds 已提交
423 424
	long slot;
	int err = 1;
P
Paul Mackerras 已提交
425
	int ssize = user_segment_size(ea);
L
Linus Torvalds 已提交
426 427 428 429

	ptep = huge_pte_offset(mm, ea);

	/* Search the Linux page table for a match with va */
P
Paul Mackerras 已提交
430
	va = hpt_va(ea, vsid, ssize);
L
Linus Torvalds 已提交
431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457

	/*
	 * If no pte found or not present, send the problem up to
	 * do_page_fault
	 */
	if (unlikely(!ptep || pte_none(*ptep)))
		goto out;

	/* 
	 * Check the user's access rights to the page.  If access should be
	 * prevented then send the problem up to do_page_fault.
	 */
	if (unlikely(access & ~pte_val(*ptep)))
		goto out;
	/*
	 * At this point, we have a pte (old_pte) which can be used to build
	 * or update an HPTE. There are 2 cases:
	 *
	 * 1. There is a valid (present) pte with no associated HPTE (this is 
	 *	the most common case)
	 * 2. There is a valid (present) pte with an associated HPTE. The
	 *	current values of the pp bits in the HPTE prevent access
	 *	because we are doing software DIRTY bit management and the
	 *	page is currently not DIRTY. 
	 */


458 459 460 461
	do {
		old_pte = pte_val(*ptep);
		if (old_pte & _PAGE_BUSY)
			goto out;
462
		new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
463 464 465 466
	} while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
					 old_pte, new_pte));

	rflags = 0x2 | (!(new_pte & _PAGE_RW));
L
Linus Torvalds 已提交
467
 	/* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
468
	rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
469 470 471 472 473
	if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
		/* No CPU has hugepages but lacks no execute, so we
		 * don't need to worry about that case */
		rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte),
						       trap);
L
Linus Torvalds 已提交
474 475

	/* Check if pte already has an hpte (case 2) */
476
	if (unlikely(old_pte & _PAGE_HASHPTE)) {
L
Linus Torvalds 已提交
477 478 479
		/* There MIGHT be an HPTE for this pte */
		unsigned long hash, slot;

P
Paul Mackerras 已提交
480
		hash = hpt_hash(va, HPAGE_SHIFT, ssize);
481
		if (old_pte & _PAGE_F_SECOND)
L
Linus Torvalds 已提交
482 483
			hash = ~hash;
		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
484
		slot += (old_pte & _PAGE_F_GIX) >> 12;
L
Linus Torvalds 已提交
485

486
		if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_huge_psize,
P
Paul Mackerras 已提交
487
					 ssize, local) == -1)
488
			old_pte &= ~_PAGE_HPTEFLAGS;
L
Linus Torvalds 已提交
489 490
	}

491
	if (likely(!(old_pte & _PAGE_HASHPTE))) {
P
Paul Mackerras 已提交
492
		unsigned long hash = hpt_hash(va, HPAGE_SHIFT, ssize);
L
Linus Torvalds 已提交
493 494
		unsigned long hpte_group;

495
		pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
L
Linus Torvalds 已提交
496 497 498 499 500

repeat:
		hpte_group = ((hash & htab_hash_mask) *
			      HPTES_PER_GROUP) & ~0x7UL;

501
		/* clear HPTE slot informations in new PTE */
502 503 504
#ifdef CONFIG_PPC_64K_PAGES
		new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HPTE_SUB0;
#else
505
		new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
506
#endif
L
Linus Torvalds 已提交
507
		/* Add in WIMG bits */
508 509
		rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
				      _PAGE_COHERENT | _PAGE_GUARDED));
L
Linus Torvalds 已提交
510

511 512
		/* Insert into the hash table, primary slot */
		slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0,
P
Paul Mackerras 已提交
513
					  mmu_huge_psize, ssize);
L
Linus Torvalds 已提交
514 515 516 517 518

		/* Primary is full, try the secondary */
		if (unlikely(slot == -1)) {
			hpte_group = ((~hash & htab_hash_mask) *
				      HPTES_PER_GROUP) & ~0x7UL; 
519
			slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags,
520
						  HPTE_V_SECONDARY,
P
Paul Mackerras 已提交
521
						  mmu_huge_psize, ssize);
L
Linus Torvalds 已提交
522 523
			if (slot == -1) {
				if (mftb() & 0x1)
524 525
					hpte_group = ((hash & htab_hash_mask) *
						      HPTES_PER_GROUP)&~0x7UL;
L
Linus Torvalds 已提交
526 527 528 529 530 531 532 533 534

				ppc_md.hpte_remove(hpte_group);
				goto repeat;
                        }
		}

		if (unlikely(slot == -2))
			panic("hash_huge_page: pte_insert failed\n");

I
Ishizaki Kou 已提交
535
		new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX);
L
Linus Torvalds 已提交
536 537
	}

538
	/*
H
Hugh Dickins 已提交
539
	 * No need to use ldarx/stdcx here
540 541 542
	 */
	*ptep = __pte(new_pte & ~_PAGE_BUSY);

L
Linus Torvalds 已提交
543 544 545 546 547
	err = 0;

 out:
	return err;
}
548

549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
void set_huge_psize(int psize)
{
	/* Check that it is a page size supported by the hardware and
	 * that it fits within pagetable limits. */
	if (mmu_psize_defs[psize].shift && mmu_psize_defs[psize].shift < SID_SHIFT &&
		(mmu_psize_defs[psize].shift > MIN_HUGEPTE_SHIFT ||
			mmu_psize_defs[psize].shift == HPAGE_SHIFT_64K)) {
		HPAGE_SHIFT = mmu_psize_defs[psize].shift;
		mmu_huge_psize = psize;
#ifdef CONFIG_PPC_64K_PAGES
		hugepte_shift = (PMD_SHIFT-HPAGE_SHIFT);
#else
		if (HPAGE_SHIFT == HPAGE_SHIFT_64K)
			hugepte_shift = (PMD_SHIFT-HPAGE_SHIFT);
		else
			hugepte_shift = (PUD_SHIFT-HPAGE_SHIFT);
#endif

	} else
		HPAGE_SHIFT = 0;
}

static int __init hugepage_setup_sz(char *str)
{
	unsigned long long size;
	int mmu_psize = -1;
	int shift;

	size = memparse(str, &str);

	shift = __ffs(size);
	switch (shift) {
#ifndef CONFIG_PPC_64K_PAGES
	case HPAGE_SHIFT_64K:
		mmu_psize = MMU_PAGE_64K;
		break;
#endif
	case HPAGE_SHIFT_16M:
		mmu_psize = MMU_PAGE_16M;
		break;
	}

	if (mmu_psize >=0 && mmu_psize_defs[mmu_psize].shift)
		set_huge_psize(mmu_psize);
	else
		printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size);

	return 1;
}
__setup("hugepagesz=", hugepage_setup_sz);

600
static void zero_ctor(struct kmem_cache *cache, void *addr)
601 602 603 604 605 606 607 608 609 610 611 612
{
	memset(addr, 0, kmem_cache_size(cache));
}

static int __init hugetlbpage_init(void)
{
	if (!cpu_has_feature(CPU_FTR_16M_PAGE))
		return -ENODEV;

	huge_pgtable_cache = kmem_cache_create("hugepte_cache",
					       HUGEPTE_TABLE_SIZE,
					       HUGEPTE_TABLE_SIZE,
613
					       0,
614
					       zero_ctor);
615 616 617 618 619 620 621
	if (! huge_pgtable_cache)
		panic("hugetlbpage_init(): could not create hugepte cache\n");

	return 0;
}

module_init(hugetlbpage_init);