hugetlbpage.c 20.9 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
B
Becky Bruce 已提交
2
 * PPC Huge TLB Page Support for Kernel.
L
Linus Torvalds 已提交
3 4
 *
 * Copyright (C) 2003 David Gibson, IBM Corporation.
B
Becky Bruce 已提交
5
 * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
L
Linus Torvalds 已提交
6 7 8 9 10 11
 *
 * Based on the IA-32 version:
 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
 */

#include <linux/mm.h>
12
#include <linux/io.h>
13
#include <linux/slab.h>
L
Linus Torvalds 已提交
14
#include <linux/hugetlb.h>
B
Becky Bruce 已提交
15 16 17
#include <linux/of_fdt.h>
#include <linux/memblock.h>
#include <linux/bootmem.h>
18
#include <asm/pgtable.h>
L
Linus Torvalds 已提交
19 20
#include <asm/pgalloc.h>
#include <asm/tlb.h>
B
Becky Bruce 已提交
21
#include <asm/setup.h>
L
Linus Torvalds 已提交
22

23 24 25
#define PAGE_SHIFT_64K	16
#define PAGE_SHIFT_16M	24
#define PAGE_SHIFT_16G	34
26

B
Becky Bruce 已提交
27
unsigned int HPAGE_SHIFT;
28

B
Becky Bruce 已提交
29 30 31 32 33 34 35 36 37 38
/*
 * Tracks gpages after the device tree is scanned and before the
 * huge_boot_pages list is ready.  On 64-bit implementations, this is
 * just used to track 16G pages and so is a single array.  32-bit
 * implementations may have more than one gpage size due to limitations
 * of the memory allocators, so we need multiple arrays
 */
#ifdef CONFIG_PPC64
#define MAX_NUMBER_GPAGES	1024
static u64 gpage_freearray[MAX_NUMBER_GPAGES];
39
static unsigned nr_gpages;
B
Becky Bruce 已提交
40 41 42 43 44 45 46 47
#else
#define MAX_NUMBER_GPAGES	128
struct psize_gpages {
	u64 gpage_list[MAX_NUMBER_GPAGES];
	unsigned int nr_gpages;
};
static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT];
#endif
48

49 50
static inline int shift_to_mmu_psize(unsigned int shift)
{
51 52 53 54 55
	int psize;

	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
		if (mmu_psize_defs[psize].shift == shift)
			return psize;
56 57 58 59 60 61 62 63 64 65
	return -1;
}

static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
{
	if (mmu_psize_defs[mmu_psize].shift)
		return mmu_psize_defs[mmu_psize].shift;
	BUG();
}

66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
#define hugepd_none(hpd)	((hpd).pd == 0)

pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift)
{
	pgd_t *pg;
	pud_t *pu;
	pmd_t *pm;
	hugepd_t *hpdp = NULL;
	unsigned pdshift = PGDIR_SHIFT;

	if (shift)
		*shift = 0;

	pg = pgdir + pgd_index(ea);
	if (is_hugepd(pg)) {
		hpdp = (hugepd_t *)pg;
	} else if (!pgd_none(*pg)) {
		pdshift = PUD_SHIFT;
		pu = pud_offset(pg, ea);
		if (is_hugepd(pu))
			hpdp = (hugepd_t *)pu;
		else if (!pud_none(*pu)) {
			pdshift = PMD_SHIFT;
			pm = pmd_offset(pu, ea);
			if (is_hugepd(pm))
				hpdp = (hugepd_t *)pm;
			else if (!pmd_none(*pm)) {
B
Becky Bruce 已提交
93
				return pte_offset_kernel(pm, ea);
94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
			}
		}
	}

	if (!hpdp)
		return NULL;

	if (shift)
		*shift = hugepd_shift(*hpdp);
	return hugepte_offset(hpdp, ea, pdshift);
}

pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
	return find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
}

111
static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
112
			   unsigned long address, unsigned pdshift, unsigned pshift)
113
{
B
Becky Bruce 已提交
114 115 116 117 118 119 120 121 122 123 124 125
	struct kmem_cache *cachep;
	pte_t *new;

#ifdef CONFIG_PPC64
	cachep = PGT_CACHE(pdshift - pshift);
#else
	int i;
	int num_hugepd = 1 << (pshift - pdshift);
	cachep = hugepte_cache;
#endif

	new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT);
126

127 128 129
	BUG_ON(pshift > HUGEPD_SHIFT_MASK);
	BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);

130 131 132 133
	if (! new)
		return -ENOMEM;

	spin_lock(&mm->page_table_lock);
B
Becky Bruce 已提交
134
#ifdef CONFIG_PPC64
135
	if (!hugepd_none(*hpdp))
B
Becky Bruce 已提交
136
		kmem_cache_free(cachep, new);
137
	else
B
Becky Bruce 已提交
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
		hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
#else
	/*
	 * We have multiple higher-level entries that point to the same
	 * actual pte location.  Fill in each as we go and backtrack on error.
	 * We need all of these so the DTLB pgtable walk code can find the
	 * right higher-level entry without knowing if it's a hugepage or not.
	 */
	for (i = 0; i < num_hugepd; i++, hpdp++) {
		if (unlikely(!hugepd_none(*hpdp)))
			break;
		else
			hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
	}
	/* If we bailed from the for loop early, an error occurred, clean up */
	if (i < num_hugepd) {
		for (i = i - 1 ; i >= 0; i--, hpdp--)
			hpdp->pd = 0;
		kmem_cache_free(cachep, new);
	}
#endif
159 160 161 162
	spin_unlock(&mm->page_table_lock);
	return 0;
}

163
pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
164
{
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
	pgd_t *pg;
	pud_t *pu;
	pmd_t *pm;
	hugepd_t *hpdp = NULL;
	unsigned pshift = __ffs(sz);
	unsigned pdshift = PGDIR_SHIFT;

	addr &= ~(sz-1);

	pg = pgd_offset(mm, addr);
	if (pshift >= PUD_SHIFT) {
		hpdp = (hugepd_t *)pg;
	} else {
		pdshift = PUD_SHIFT;
		pu = pud_alloc(mm, pg, addr);
		if (pshift >= PMD_SHIFT) {
			hpdp = (hugepd_t *)pu;
		} else {
			pdshift = PMD_SHIFT;
			pm = pmd_alloc(mm, pu, addr);
			hpdp = (hugepd_t *)pm;
		}
	}

	if (!hpdp)
		return NULL;

	BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));

	if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
		return NULL;

	return hugepte_offset(hpdp, addr, pdshift);
198 199
}

B
Becky Bruce 已提交
200
#ifdef CONFIG_PPC32
201 202 203
/* Build list of addresses of gigantic pages.  This function is used in early
 * boot before the buddy or bootmem allocator is setup.
 */
B
Becky Bruce 已提交
204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325
void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
{
	unsigned int idx = shift_to_mmu_psize(__ffs(page_size));
	int i;

	if (addr == 0)
		return;

	gpage_freearray[idx].nr_gpages = number_of_pages;

	for (i = 0; i < number_of_pages; i++) {
		gpage_freearray[idx].gpage_list[i] = addr;
		addr += page_size;
	}
}

/*
 * Moves the gigantic page addresses from the temporary list to the
 * huge_boot_pages list.
 */
int alloc_bootmem_huge_page(struct hstate *hstate)
{
	struct huge_bootmem_page *m;
	int idx = shift_to_mmu_psize(hstate->order + PAGE_SHIFT);
	int nr_gpages = gpage_freearray[idx].nr_gpages;

	if (nr_gpages == 0)
		return 0;

#ifdef CONFIG_HIGHMEM
	/*
	 * If gpages can be in highmem we can't use the trick of storing the
	 * data structure in the page; allocate space for this
	 */
	m = alloc_bootmem(sizeof(struct huge_bootmem_page));
	m->phys = gpage_freearray[idx].gpage_list[--nr_gpages];
#else
	m = phys_to_virt(gpage_freearray[idx].gpage_list[--nr_gpages]);
#endif

	list_add(&m->list, &huge_boot_pages);
	gpage_freearray[idx].nr_gpages = nr_gpages;
	gpage_freearray[idx].gpage_list[nr_gpages] = 0;
	m->hstate = hstate;

	return 1;
}
/*
 * Scan the command line hugepagesz= options for gigantic pages; store those in
 * a list that we use to allocate the memory once all options are parsed.
 */

unsigned long gpage_npages[MMU_PAGE_COUNT];

static int __init do_gpage_early_setup(char *param, char *val)
{
	static phys_addr_t size;
	unsigned long npages;

	/*
	 * The hugepagesz and hugepages cmdline options are interleaved.  We
	 * use the size variable to keep track of whether or not this was done
	 * properly and skip over instances where it is incorrect.  Other
	 * command-line parsing code will issue warnings, so we don't need to.
	 *
	 */
	if ((strcmp(param, "default_hugepagesz") == 0) ||
	    (strcmp(param, "hugepagesz") == 0)) {
		size = memparse(val, NULL);
	} else if (strcmp(param, "hugepages") == 0) {
		if (size != 0) {
			if (sscanf(val, "%lu", &npages) <= 0)
				npages = 0;
			gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages;
			size = 0;
		}
	}
	return 0;
}


/*
 * This function allocates physical space for pages that are larger than the
 * buddy allocator can handle.  We want to allocate these in highmem because
 * the amount of lowmem is limited.  This means that this function MUST be
 * called before lowmem_end_addr is set up in MMU_init() in order for the lmb
 * allocate to grab highmem.
 */
void __init reserve_hugetlb_gpages(void)
{
	static __initdata char cmdline[COMMAND_LINE_SIZE];
	phys_addr_t size, base;
	int i;

	strlcpy(cmdline, boot_command_line, COMMAND_LINE_SIZE);
	parse_args("hugetlb gpages", cmdline, NULL, 0, &do_gpage_early_setup);

	/*
	 * Walk gpage list in reverse, allocating larger page sizes first.
	 * Skip over unsupported sizes, or sizes that have 0 gpages allocated.
	 * When we reach the point in the list where pages are no longer
	 * considered gpages, we're done.
	 */
	for (i = MMU_PAGE_COUNT-1; i >= 0; i--) {
		if (mmu_psize_defs[i].shift == 0 || gpage_npages[i] == 0)
			continue;
		else if (mmu_psize_to_shift(i) < (MAX_ORDER + PAGE_SHIFT))
			break;

		size = (phys_addr_t)(1ULL << mmu_psize_to_shift(i));
		base = memblock_alloc_base(size * gpage_npages[i], size,
					   MEMBLOCK_ALLOC_ANYWHERE);
		add_gpage(base, size, gpage_npages[i]);
	}
}

#else /* PPC64 */

/* Build list of addresses of gigantic pages.  This function is used in early
 * boot before the buddy or bootmem allocator is setup.
 */
void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
326 327 328 329 330 331 332 333 334 335 336
{
	if (!addr)
		return;
	while (number_of_pages > 0) {
		gpage_freearray[nr_gpages] = addr;
		nr_gpages++;
		number_of_pages--;
		addr += page_size;
	}
}

337
/* Moves the gigantic page addresses from the temporary list to the
338 339 340
 * huge_boot_pages list.
 */
int alloc_bootmem_huge_page(struct hstate *hstate)
341 342 343 344 345 346 347
{
	struct huge_bootmem_page *m;
	if (nr_gpages == 0)
		return 0;
	m = phys_to_virt(gpage_freearray[--nr_gpages]);
	gpage_freearray[nr_gpages] = 0;
	list_add(&m->list, &huge_boot_pages);
348
	m->hstate = hstate;
349 350
	return 1;
}
B
Becky Bruce 已提交
351
#endif
352

353 354 355 356 357
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
	return 0;
}

B
Becky Bruce 已提交
358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
#ifdef CONFIG_PPC32
#define HUGEPD_FREELIST_SIZE \
	((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))

struct hugepd_freelist {
	struct rcu_head	rcu;
	unsigned int index;
	void *ptes[0];
};

static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur);

static void hugepd_free_rcu_callback(struct rcu_head *head)
{
	struct hugepd_freelist *batch =
		container_of(head, struct hugepd_freelist, rcu);
	unsigned int i;

	for (i = 0; i < batch->index; i++)
		kmem_cache_free(hugepte_cache, batch->ptes[i]);

	free_page((unsigned long)batch);
}

static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
{
	struct hugepd_freelist **batchp;

	batchp = &__get_cpu_var(hugepd_freelist_cur);

	if (atomic_read(&tlb->mm->mm_users) < 2 ||
	    cpumask_equal(mm_cpumask(tlb->mm),
			  cpumask_of(smp_processor_id()))) {
		kmem_cache_free(hugepte_cache, hugepte);
		return;
	}

	if (*batchp == NULL) {
		*batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC);
		(*batchp)->index = 0;
	}

	(*batchp)->ptes[(*batchp)->index++] = hugepte;
	if ((*batchp)->index == HUGEPD_FREELIST_SIZE) {
		call_rcu_sched(&(*batchp)->rcu, hugepd_free_rcu_callback);
		*batchp = NULL;
	}
}
#endif

408 409 410
static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
			      unsigned long start, unsigned long end,
			      unsigned long floor, unsigned long ceiling)
411 412
{
	pte_t *hugepte = hugepd_page(*hpdp);
B
Becky Bruce 已提交
413 414
	int i;

415
	unsigned long pdmask = ~((1UL << pdshift) - 1);
B
Becky Bruce 已提交
416 417 418 419 420 421 422 423
	unsigned int num_hugepd = 1;

#ifdef CONFIG_PPC64
	unsigned int shift = hugepd_shift(*hpdp);
#else
	/* Note: On 32-bit the hpdp may be the first of several */
	num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift));
#endif
424 425 426 427 428 429 430 431 432 433 434

	start &= pdmask;
	if (start < floor)
		return;
	if (ceiling) {
		ceiling &= pdmask;
		if (! ceiling)
			return;
	}
	if (end - 1 > ceiling - 1)
		return;
435

B
Becky Bruce 已提交
436 437 438
	for (i = 0; i < num_hugepd; i++, hpdp++)
		hpdp->pd = 0;

439
	tlb->need_flush = 1;
B
Becky Bruce 已提交
440
#ifdef CONFIG_PPC64
441
	pgtable_free_tlb(tlb, hugepte, pdshift - shift);
B
Becky Bruce 已提交
442 443 444
#else
	hugepd_free(tlb, hugepte);
#endif
445 446 447 448
}

static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
				   unsigned long addr, unsigned long end,
449
				   unsigned long floor, unsigned long ceiling)
450 451 452 453 454 455 456 457 458 459 460
{
	pmd_t *pmd;
	unsigned long next;
	unsigned long start;

	start = addr;
	pmd = pmd_offset(pud, addr);
	do {
		next = pmd_addr_end(addr, end);
		if (pmd_none(*pmd))
			continue;
461 462
		free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
				  addr, next, floor, ceiling);
463 464 465 466 467 468 469 470 471
	} while (pmd++, addr = next, addr != end);

	start &= PUD_MASK;
	if (start < floor)
		return;
	if (ceiling) {
		ceiling &= PUD_MASK;
		if (!ceiling)
			return;
L
Linus Torvalds 已提交
472
	}
473 474
	if (end - 1 > ceiling - 1)
		return;
L
Linus Torvalds 已提交
475

476 477
	pmd = pmd_offset(pud, start);
	pud_clear(pud);
478
	pmd_free_tlb(tlb, pmd, start);
479 480 481 482 483 484 485 486 487 488 489 490 491 492
}

static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
				   unsigned long addr, unsigned long end,
				   unsigned long floor, unsigned long ceiling)
{
	pud_t *pud;
	unsigned long next;
	unsigned long start;

	start = addr;
	pud = pud_offset(pgd, addr);
	do {
		next = pud_addr_end(addr, end);
493
		if (!is_hugepd(pud)) {
494 495
			if (pud_none_or_clear_bad(pud))
				continue;
496
			hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
497
					       ceiling);
498
		} else {
499 500
			free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
					  addr, next, floor, ceiling);
501
		}
502 503 504 505 506 507 508 509 510 511 512 513 514 515 516
	} while (pud++, addr = next, addr != end);

	start &= PGDIR_MASK;
	if (start < floor)
		return;
	if (ceiling) {
		ceiling &= PGDIR_MASK;
		if (!ceiling)
			return;
	}
	if (end - 1 > ceiling - 1)
		return;

	pud = pud_offset(pgd, start);
	pgd_clear(pgd);
517
	pud_free_tlb(tlb, pud, start);
518 519 520 521 522 523 524
}

/*
 * This function frees user-level page tables of a process.
 *
 * Must be called with pagetable lock held.
 */
525
void hugetlb_free_pgd_range(struct mmu_gather *tlb,
526 527 528 529 530 531 532
			    unsigned long addr, unsigned long end,
			    unsigned long floor, unsigned long ceiling)
{
	pgd_t *pgd;
	unsigned long next;

	/*
533 534 535 536 537 538 539 540 541 542
	 * Because there are a number of different possible pagetable
	 * layouts for hugepage ranges, we limit knowledge of how
	 * things should be laid out to the allocation path
	 * (huge_pte_alloc(), above).  Everything else works out the
	 * structure as it goes from information in the hugepd
	 * pointers.  That means that we can't here use the
	 * optimization used in the normal page free_pgd_range(), of
	 * checking whether we're actually covering a large enough
	 * range to have to do anything at the top level of the walk
	 * instead of at the bottom.
543
	 *
544 545 546
	 * To make sense of this, you should probably go read the big
	 * block comment at the top of the normal free_pgd_range(),
	 * too.
547 548 549 550
	 */

	do {
		next = pgd_addr_end(addr, end);
B
Becky Bruce 已提交
551
		pgd = pgd_offset(tlb->mm, addr);
552
		if (!is_hugepd(pgd)) {
553 554 555 556
			if (pgd_none_or_clear_bad(pgd))
				continue;
			hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
		} else {
B
Becky Bruce 已提交
557 558 559 560 561 562 563 564 565
#ifdef CONFIG_PPC32
			/*
			 * Increment next by the size of the huge mapping since
			 * on 32-bit there may be more than one entry at the pgd
			 * level for a single hugepage, but all of them point to
			 * the same kmem cache that holds the hugepte.
			 */
			next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
#endif
566 567
			free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
					  addr, next, floor, ceiling);
568
		}
B
Becky Bruce 已提交
569
	} while (addr = next, addr != end);
L
Linus Torvalds 已提交
570 571 572 573 574 575 576
}

struct page *
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
{
	pte_t *ptep;
	struct page *page;
577 578 579 580
	unsigned shift;
	unsigned long mask;

	ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
L
Linus Torvalds 已提交
581

582
	/* Verify it is a huge page else bail. */
583
	if (!ptep || !shift)
L
Linus Torvalds 已提交
584 585
		return ERR_PTR(-EINVAL);

586
	mask = (1UL << shift) - 1;
L
Linus Torvalds 已提交
587
	page = pte_page(*ptep);
588 589
	if (page)
		page += (address & mask) / PAGE_SIZE;
L
Linus Torvalds 已提交
590 591 592 593 594 595 596 597 598

	return page;
}

int pmd_huge(pmd_t pmd)
{
	return 0;
}

A
Andi Kleen 已提交
599 600 601 602 603
int pud_huge(pud_t pud)
{
	return 0;
}

L
Linus Torvalds 已提交
604 605 606 607 608 609 610 611
struct page *
follow_huge_pmd(struct mm_struct *mm, unsigned long address,
		pmd_t *pmd, int write)
{
	BUG();
	return NULL;
}

612 613 614 615 616
static noinline int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
		       unsigned long end, int write, struct page **pages, int *nr)
{
	unsigned long mask;
	unsigned long pte_end;
617
	struct page *head, *page, *tail;
618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639
	pte_t pte;
	int refs;

	pte_end = (addr + sz) & ~(sz-1);
	if (pte_end < end)
		end = pte_end;

	pte = *ptep;
	mask = _PAGE_PRESENT | _PAGE_USER;
	if (write)
		mask |= _PAGE_RW;

	if ((pte_val(pte) & mask) != mask)
		return 0;

	/* hugepages are never "special" */
	VM_BUG_ON(!pfn_valid(pte_pfn(pte)));

	refs = 0;
	head = pte_page(pte);

	page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
640
	tail = page;
641 642 643 644 645 646 647 648 649 650 651 652 653 654 655
	do {
		VM_BUG_ON(compound_head(page) != head);
		pages[*nr] = page;
		(*nr)++;
		page++;
		refs++;
	} while (addr += PAGE_SIZE, addr != end);

	if (!page_cache_add_speculative(head, refs)) {
		*nr -= refs;
		return 0;
	}

	if (unlikely(pte_val(pte) != pte_val(*ptep))) {
		/* Could be optimized better */
656 657
		*nr -= refs;
		while (refs--)
658
			put_page(head);
659 660 661 662 663 664 665 666 667 668 669
		return 0;
	}

	/*
	 * Any tail page need their mapcount reference taken before we
	 * return.
	 */
	while (refs--) {
		if (PageTail(tail))
			get_huge_page_tail(tail);
		tail++;
670 671 672 673 674
	}

	return 1;
}

D
David Gibson 已提交
675 676 677 678 679 680 681
static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
				      unsigned long sz)
{
	unsigned long __boundary = (addr + sz) & ~(sz-1);
	return (__boundary - 1 < end - 1) ? __boundary : end;
}

682 683 684 685 686 687
int gup_hugepd(hugepd_t *hugepd, unsigned pdshift,
	       unsigned long addr, unsigned long end,
	       int write, struct page **pages, int *nr)
{
	pte_t *ptep;
	unsigned long sz = 1UL << hugepd_shift(*hugepd);
D
David Gibson 已提交
688
	unsigned long next;
689 690 691

	ptep = hugepte_offset(hugepd, addr, pdshift);
	do {
D
David Gibson 已提交
692
		next = hugepte_addr_end(addr, end, sz);
693 694
		if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
			return 0;
D
David Gibson 已提交
695
	} while (ptep++, addr = next, addr != end);
696 697 698

	return 1;
}
L
Linus Torvalds 已提交
699 700 701 702 703

unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
					unsigned long len, unsigned long pgoff,
					unsigned long flags)
{
704
#ifdef CONFIG_PPC_MM_SLICES
705 706
	struct hstate *hstate = hstate_file(file);
	int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
707

708
	return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0);
B
Becky Bruce 已提交
709 710 711
#else
	return get_unmapped_area(file, addr, len, pgoff, flags);
#endif
L
Linus Torvalds 已提交
712 713
}

714 715
unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
{
716
#ifdef CONFIG_PPC_MM_SLICES
717 718 719
	unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);

	return 1UL << mmu_psize_to_shift(psize);
B
Becky Bruce 已提交
720 721 722 723 724 725 726 727 728 729 730 731 732
#else
	if (!is_vm_hugetlb_page(vma))
		return PAGE_SIZE;

	return huge_page_size(hstate_vma(vma));
#endif
}

static inline bool is_power_of_4(unsigned long x)
{
	if (is_power_of_2(x))
		return (__ilog2(x) % 2) ? false : true;
	return false;
733 734
}

735
static int __init add_huge_page_size(unsigned long long size)
736
{
737 738
	int shift = __ffs(size);
	int mmu_psize;
739

740
	/* Check that it is a page size supported by the hardware and
741
	 * that it fits within pagetable and slice limits. */
B
Becky Bruce 已提交
742 743 744 745
#ifdef CONFIG_PPC_FSL_BOOK3E
	if ((size < PAGE_SIZE) || !is_power_of_4(size))
		return -EINVAL;
#else
746 747 748
	if (!is_power_of_2(size)
	    || (shift > SLICE_HIGH_SHIFT) || (shift <= PAGE_SHIFT))
		return -EINVAL;
B
Becky Bruce 已提交
749
#endif
750

751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770
	if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
		return -EINVAL;

#ifdef CONFIG_SPU_FS_64K_LS
	/* Disable support for 64K huge pages when 64K SPU local store
	 * support is enabled as the current implementation conflicts.
	 */
	if (shift == PAGE_SHIFT_64K)
		return -EINVAL;
#endif /* CONFIG_SPU_FS_64K_LS */

	BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);

	/* Return if huge page size has already been setup */
	if (size_to_hstate(size))
		return 0;

	hugetlb_add_hstate(shift - PAGE_SHIFT);

	return 0;
771 772 773 774 775 776 777 778
}

static int __init hugepage_setup_sz(char *str)
{
	unsigned long long size;

	size = memparse(str, &str);

779
	if (add_huge_page_size(size) != 0)
780 781 782 783 784 785
		printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size);

	return 1;
}
__setup("hugepagesz=", hugepage_setup_sz);

B
Becky Bruce 已提交
786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825
#ifdef CONFIG_FSL_BOOKE
struct kmem_cache *hugepte_cache;
static int __init hugetlbpage_init(void)
{
	int psize;

	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
		unsigned shift;

		if (!mmu_psize_defs[psize].shift)
			continue;

		shift = mmu_psize_to_shift(psize);

		/* Don't treat normal page sizes as huge... */
		if (shift != PAGE_SHIFT)
			if (add_huge_page_size(1ULL << shift) < 0)
				continue;
	}

	/*
	 * Create a kmem cache for hugeptes.  The bottom bits in the pte have
	 * size information encoded in them, so align them to allow this
	 */
	hugepte_cache =  kmem_cache_create("hugepte-cache", sizeof(pte_t),
					   HUGEPD_SHIFT_MASK + 1, 0, NULL);
	if (hugepte_cache == NULL)
		panic("%s: Unable to create kmem cache for hugeptes\n",
		      __func__);

	/* Default hpage size = 4M */
	if (mmu_psize_defs[MMU_PAGE_4M].shift)
		HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift;
	else
		panic("%s: Unable to set default huge page size\n", __func__);


	return 0;
}
#else
826 827
static int __init hugetlbpage_init(void)
{
828
	int psize;
829

830
	if (!mmu_has_feature(MMU_FTR_16M_PAGE))
831
		return -ENODEV;
832

833 834 835
	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
		unsigned shift;
		unsigned pdshift;
836

837 838
		if (!mmu_psize_defs[psize].shift)
			continue;
839

840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855
		shift = mmu_psize_to_shift(psize);

		if (add_huge_page_size(1ULL << shift) < 0)
			continue;

		if (shift < PMD_SHIFT)
			pdshift = PMD_SHIFT;
		else if (shift < PUD_SHIFT)
			pdshift = PUD_SHIFT;
		else
			pdshift = PGDIR_SHIFT;

		pgtable_cache_add(pdshift - shift, NULL);
		if (!PGT_CACHE(pdshift - shift))
			panic("hugetlbpage_init(): could not create "
			      "pgtable cache for %d bit pagesize\n", shift);
856
	}
857

858 859 860 861 862 863 864 865
	/* Set default large page size. Currently, we pick 16M or 1M
	 * depending on what is available
	 */
	if (mmu_psize_defs[MMU_PAGE_16M].shift)
		HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
	else if (mmu_psize_defs[MMU_PAGE_1M].shift)
		HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;

866 867
	return 0;
}
B
Becky Bruce 已提交
868
#endif
869
module_init(hugetlbpage_init);
870 871 872 873

void flush_dcache_icache_hugepage(struct page *page)
{
	int i;
B
Becky Bruce 已提交
874
	void *start;
875 876 877

	BUG_ON(!PageCompound(page));

B
Becky Bruce 已提交
878 879 880 881 882 883 884 885 886
	for (i = 0; i < (1UL << compound_order(page)); i++) {
		if (!PageHighMem(page)) {
			__flush_dcache_icache(page_address(page+i));
		} else {
			start = kmap_atomic(page+i, KM_PPC_SYNC_ICACHE);
			__flush_dcache_icache(start);
			kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
		}
	}
887
}