hugetlbpage.c 26.7 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
B
Becky Bruce 已提交
2
 * PPC Huge TLB Page Support for Kernel.
L
Linus Torvalds 已提交
3 4
 *
 * Copyright (C) 2003 David Gibson, IBM Corporation.
B
Becky Bruce 已提交
5
 * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
L
Linus Torvalds 已提交
6 7 8 9 10 11
 *
 * Based on the IA-32 version:
 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
 */

#include <linux/mm.h>
12
#include <linux/io.h>
13
#include <linux/slab.h>
L
Linus Torvalds 已提交
14
#include <linux/hugetlb.h>
15
#include <linux/export.h>
B
Becky Bruce 已提交
16 17 18
#include <linux/of_fdt.h>
#include <linux/memblock.h>
#include <linux/bootmem.h>
19
#include <linux/moduleparam.h>
20
#include <asm/pgtable.h>
L
Linus Torvalds 已提交
21 22
#include <asm/pgalloc.h>
#include <asm/tlb.h>
B
Becky Bruce 已提交
23
#include <asm/setup.h>
24 25 26
#include <asm/hugetlb.h>

#ifdef CONFIG_HUGETLB_PAGE
L
Linus Torvalds 已提交
27

28 29 30
#define PAGE_SHIFT_64K	16
#define PAGE_SHIFT_16M	24
#define PAGE_SHIFT_16G	34
31

B
Becky Bruce 已提交
32
unsigned int HPAGE_SHIFT;
33

B
Becky Bruce 已提交
34 35
/*
 * Tracks gpages after the device tree is scanned and before the
36 37 38 39
 * huge_boot_pages list is ready.  On non-Freescale implementations, this is
 * just used to track 16G pages and so is a single array.  FSL-based
 * implementations may have more than one gpage size, so we need multiple
 * arrays
B
Becky Bruce 已提交
40
 */
41
#ifdef CONFIG_PPC_FSL_BOOK3E
B
Becky Bruce 已提交
42 43 44 45 46 47
#define MAX_NUMBER_GPAGES	128
struct psize_gpages {
	u64 gpage_list[MAX_NUMBER_GPAGES];
	unsigned int nr_gpages;
};
static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT];
48 49 50 51
#else
#define MAX_NUMBER_GPAGES	1024
static u64 gpage_freearray[MAX_NUMBER_GPAGES];
static unsigned nr_gpages;
B
Becky Bruce 已提交
52
#endif
53

54 55
#define hugepd_none(hpd)	((hpd).pd == 0)

56 57 58 59 60 61 62 63 64
#ifdef CONFIG_PPC_BOOK3S_64
/*
 * At this point we do the placement change only for BOOK3S 64. This would
 * possibly work on other subarchs.
 */

/*
 * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have
 * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD;
65 66 67
 *
 * Defined in such a way that we can optimize away code block at build time
 * if CONFIG_HUGETLB_PAGE=n.
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
 */
int pmd_huge(pmd_t pmd)
{
	/*
	 * leaf pte for huge page, bottom two bits != 00
	 */
	return ((pmd_val(pmd) & 0x3) != 0x0);
}

int pud_huge(pud_t pud)
{
	/*
	 * leaf pte for huge page, bottom two bits != 00
	 */
	return ((pud_val(pud) & 0x3) != 0x0);
}

int pgd_huge(pgd_t pgd)
{
	/*
	 * leaf pte for huge page, bottom two bits != 00
	 */
	return ((pgd_val(pgd) & 0x3) != 0x0);
}
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110

#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_DEBUG_VM)
/*
 * This enables us to catch the wrong page directory format
 * Moved here so that we can use WARN() in the call.
 */
int hugepd_ok(hugepd_t hpd)
{
	bool is_hugepd;

	/*
	 * We should not find this format in page directory, warn otherwise.
	 */
	is_hugepd = (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0));
	WARN(is_hugepd, "Found wrong page directory format\n");
	return 0;
}
#endif

111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
#else
int pmd_huge(pmd_t pmd)
{
	return 0;
}

int pud_huge(pud_t pud)
{
	return 0;
}

int pgd_huge(pgd_t pgd)
{
	return 0;
}
#endif

128 129
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
130
	/* Only called for hugetlbfs pages, hence can ignore THP */
131
	return __find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
132 133
}

134
static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
135
			   unsigned long address, unsigned pdshift, unsigned pshift)
136
{
B
Becky Bruce 已提交
137 138 139
	struct kmem_cache *cachep;
	pte_t *new;

140
#ifdef CONFIG_PPC_FSL_BOOK3E
B
Becky Bruce 已提交
141 142 143
	int i;
	int num_hugepd = 1 << (pshift - pdshift);
	cachep = hugepte_cache;
144 145
#else
	cachep = PGT_CACHE(pdshift - pshift);
B
Becky Bruce 已提交
146 147 148
#endif

	new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT);
149

150 151 152
	BUG_ON(pshift > HUGEPD_SHIFT_MASK);
	BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);

153 154 155 156
	if (! new)
		return -ENOMEM;

	spin_lock(&mm->page_table_lock);
157
#ifdef CONFIG_PPC_FSL_BOOK3E
B
Becky Bruce 已提交
158 159 160 161 162 163 164 165 166 167
	/*
	 * We have multiple higher-level entries that point to the same
	 * actual pte location.  Fill in each as we go and backtrack on error.
	 * We need all of these so the DTLB pgtable walk code can find the
	 * right higher-level entry without knowing if it's a hugepage or not.
	 */
	for (i = 0; i < num_hugepd; i++, hpdp++) {
		if (unlikely(!hugepd_none(*hpdp)))
			break;
		else
168
			/* We use the old format for PPC_FSL_BOOK3E */
B
Becky Bruce 已提交
169 170 171 172 173 174 175 176
			hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
	}
	/* If we bailed from the for loop early, an error occurred, clean up */
	if (i < num_hugepd) {
		for (i = i - 1 ; i >= 0; i--, hpdp--)
			hpdp->pd = 0;
		kmem_cache_free(cachep, new);
	}
177 178 179
#else
	if (!hugepd_none(*hpdp))
		kmem_cache_free(cachep, new);
180 181 182 183 184
	else {
#ifdef CONFIG_PPC_BOOK3S_64
		hpdp->pd = (unsigned long)new |
			    (shift_to_mmu_psize(pshift) << 2);
#else
185
		hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
186 187
#endif
	}
B
Becky Bruce 已提交
188
#endif
189 190 191 192
	spin_unlock(&mm->page_table_lock);
	return 0;
}

193 194 195 196 197 198 199 200 201 202 203 204
/*
 * These macros define how to determine which level of the page table holds
 * the hpdp.
 */
#ifdef CONFIG_PPC_FSL_BOOK3E
#define HUGEPD_PGD_SHIFT PGDIR_SHIFT
#define HUGEPD_PUD_SHIFT PUD_SHIFT
#else
#define HUGEPD_PGD_SHIFT PUD_SHIFT
#define HUGEPD_PUD_SHIFT PMD_SHIFT
#endif

205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
#ifdef CONFIG_PPC_BOOK3S_64
/*
 * At this point we do the placement change only for BOOK3S 64. This would
 * possibly work on other subarchs.
 */
pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
{
	pgd_t *pg;
	pud_t *pu;
	pmd_t *pm;
	hugepd_t *hpdp = NULL;
	unsigned pshift = __ffs(sz);
	unsigned pdshift = PGDIR_SHIFT;

	addr &= ~(sz-1);
	pg = pgd_offset(mm, addr);

	if (pshift == PGDIR_SHIFT)
		/* 16GB huge page */
		return (pte_t *) pg;
	else if (pshift > PUD_SHIFT)
		/*
		 * We need to use hugepd table
		 */
		hpdp = (hugepd_t *)pg;
	else {
		pdshift = PUD_SHIFT;
		pu = pud_alloc(mm, pg, addr);
		if (pshift == PUD_SHIFT)
			return (pte_t *)pu;
		else if (pshift > PMD_SHIFT)
			hpdp = (hugepd_t *)pu;
		else {
			pdshift = PMD_SHIFT;
			pm = pmd_alloc(mm, pu, addr);
			if (pshift == PMD_SHIFT)
				/* 16MB hugepage */
				return (pte_t *)pm;
			else
				hpdp = (hugepd_t *)pm;
		}
	}
	if (!hpdp)
		return NULL;

	BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));

	if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
		return NULL;

255
	return hugepte_offset(*hpdp, addr, pdshift);
256 257 258 259
}

#else

260
pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
261
{
262 263 264 265 266 267 268 269 270 271
	pgd_t *pg;
	pud_t *pu;
	pmd_t *pm;
	hugepd_t *hpdp = NULL;
	unsigned pshift = __ffs(sz);
	unsigned pdshift = PGDIR_SHIFT;

	addr &= ~(sz-1);

	pg = pgd_offset(mm, addr);
272 273

	if (pshift >= HUGEPD_PGD_SHIFT) {
274 275 276 277
		hpdp = (hugepd_t *)pg;
	} else {
		pdshift = PUD_SHIFT;
		pu = pud_alloc(mm, pg, addr);
278
		if (pshift >= HUGEPD_PUD_SHIFT) {
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
			hpdp = (hugepd_t *)pu;
		} else {
			pdshift = PMD_SHIFT;
			pm = pmd_alloc(mm, pu, addr);
			hpdp = (hugepd_t *)pm;
		}
	}

	if (!hpdp)
		return NULL;

	BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));

	if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
		return NULL;

295
	return hugepte_offset(*hpdp, addr, pdshift);
296
}
297
#endif
298

299
#ifdef CONFIG_PPC_FSL_BOOK3E
300
/* Build list of addresses of gigantic pages.  This function is used in early
301
 * boot before the buddy allocator is setup.
302
 */
B
Becky Bruce 已提交
303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325
void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
{
	unsigned int idx = shift_to_mmu_psize(__ffs(page_size));
	int i;

	if (addr == 0)
		return;

	gpage_freearray[idx].nr_gpages = number_of_pages;

	for (i = 0; i < number_of_pages; i++) {
		gpage_freearray[idx].gpage_list[i] = addr;
		addr += page_size;
	}
}

/*
 * Moves the gigantic page addresses from the temporary list to the
 * huge_boot_pages list.
 */
int alloc_bootmem_huge_page(struct hstate *hstate)
{
	struct huge_bootmem_page *m;
326
	int idx = shift_to_mmu_psize(huge_page_shift(hstate));
B
Becky Bruce 已提交
327 328 329 330 331 332 333 334 335 336
	int nr_gpages = gpage_freearray[idx].nr_gpages;

	if (nr_gpages == 0)
		return 0;

#ifdef CONFIG_HIGHMEM
	/*
	 * If gpages can be in highmem we can't use the trick of storing the
	 * data structure in the page; allocate space for this
	 */
337
	m = memblock_virt_alloc(sizeof(struct huge_bootmem_page), 0);
B
Becky Bruce 已提交
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
	m->phys = gpage_freearray[idx].gpage_list[--nr_gpages];
#else
	m = phys_to_virt(gpage_freearray[idx].gpage_list[--nr_gpages]);
#endif

	list_add(&m->list, &huge_boot_pages);
	gpage_freearray[idx].nr_gpages = nr_gpages;
	gpage_freearray[idx].gpage_list[nr_gpages] = 0;
	m->hstate = hstate;

	return 1;
}
/*
 * Scan the command line hugepagesz= options for gigantic pages; store those in
 * a list that we use to allocate the memory once all options are parsed.
 */

unsigned long gpage_npages[MMU_PAGE_COUNT];

357
static int __init do_gpage_early_setup(char *param, char *val,
358
				       const char *unused, void *arg)
B
Becky Bruce 已提交
359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376
{
	static phys_addr_t size;
	unsigned long npages;

	/*
	 * The hugepagesz and hugepages cmdline options are interleaved.  We
	 * use the size variable to keep track of whether or not this was done
	 * properly and skip over instances where it is incorrect.  Other
	 * command-line parsing code will issue warnings, so we don't need to.
	 *
	 */
	if ((strcmp(param, "default_hugepagesz") == 0) ||
	    (strcmp(param, "hugepagesz") == 0)) {
		size = memparse(val, NULL);
	} else if (strcmp(param, "hugepages") == 0) {
		if (size != 0) {
			if (sscanf(val, "%lu", &npages) <= 0)
				npages = 0;
377 378 379 380 381 382 383
			if (npages > MAX_NUMBER_GPAGES) {
				pr_warn("MMU: %lu pages requested for page "
					"size %llu KB, limiting to "
					__stringify(MAX_NUMBER_GPAGES) "\n",
					npages, size / 1024);
				npages = MAX_NUMBER_GPAGES;
			}
B
Becky Bruce 已提交
384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
			gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages;
			size = 0;
		}
	}
	return 0;
}


/*
 * This function allocates physical space for pages that are larger than the
 * buddy allocator can handle.  We want to allocate these in highmem because
 * the amount of lowmem is limited.  This means that this function MUST be
 * called before lowmem_end_addr is set up in MMU_init() in order for the lmb
 * allocate to grab highmem.
 */
void __init reserve_hugetlb_gpages(void)
{
	static __initdata char cmdline[COMMAND_LINE_SIZE];
	phys_addr_t size, base;
	int i;

	strlcpy(cmdline, boot_command_line, COMMAND_LINE_SIZE);
406
	parse_args("hugetlb gpages", cmdline, NULL, 0, 0, 0,
407
			NULL, &do_gpage_early_setup);
B
Becky Bruce 已提交
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427

	/*
	 * Walk gpage list in reverse, allocating larger page sizes first.
	 * Skip over unsupported sizes, or sizes that have 0 gpages allocated.
	 * When we reach the point in the list where pages are no longer
	 * considered gpages, we're done.
	 */
	for (i = MMU_PAGE_COUNT-1; i >= 0; i--) {
		if (mmu_psize_defs[i].shift == 0 || gpage_npages[i] == 0)
			continue;
		else if (mmu_psize_to_shift(i) < (MAX_ORDER + PAGE_SHIFT))
			break;

		size = (phys_addr_t)(1ULL << mmu_psize_to_shift(i));
		base = memblock_alloc_base(size * gpage_npages[i], size,
					   MEMBLOCK_ALLOC_ANYWHERE);
		add_gpage(base, size, gpage_npages[i]);
	}
}

428
#else /* !PPC_FSL_BOOK3E */
B
Becky Bruce 已提交
429 430

/* Build list of addresses of gigantic pages.  This function is used in early
431
 * boot before the buddy allocator is setup.
B
Becky Bruce 已提交
432 433
 */
void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
434 435 436 437 438 439 440 441 442 443 444
{
	if (!addr)
		return;
	while (number_of_pages > 0) {
		gpage_freearray[nr_gpages] = addr;
		nr_gpages++;
		number_of_pages--;
		addr += page_size;
	}
}

445
/* Moves the gigantic page addresses from the temporary list to the
446 447 448
 * huge_boot_pages list.
 */
int alloc_bootmem_huge_page(struct hstate *hstate)
449 450 451 452 453 454 455
{
	struct huge_bootmem_page *m;
	if (nr_gpages == 0)
		return 0;
	m = phys_to_virt(gpage_freearray[--nr_gpages]);
	gpage_freearray[nr_gpages] = 0;
	list_add(&m->list, &huge_boot_pages);
456
	m->hstate = hstate;
457 458
	return 1;
}
B
Becky Bruce 已提交
459
#endif
460

461
#ifdef CONFIG_PPC_FSL_BOOK3E
B
Becky Bruce 已提交
462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488
#define HUGEPD_FREELIST_SIZE \
	((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))

struct hugepd_freelist {
	struct rcu_head	rcu;
	unsigned int index;
	void *ptes[0];
};

static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur);

static void hugepd_free_rcu_callback(struct rcu_head *head)
{
	struct hugepd_freelist *batch =
		container_of(head, struct hugepd_freelist, rcu);
	unsigned int i;

	for (i = 0; i < batch->index; i++)
		kmem_cache_free(hugepte_cache, batch->ptes[i]);

	free_page((unsigned long)batch);
}

static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
{
	struct hugepd_freelist **batchp;

489
	batchp = this_cpu_ptr(&hugepd_freelist_cur);
B
Becky Bruce 已提交
490 491 492 493 494

	if (atomic_read(&tlb->mm->mm_users) < 2 ||
	    cpumask_equal(mm_cpumask(tlb->mm),
			  cpumask_of(smp_processor_id()))) {
		kmem_cache_free(hugepte_cache, hugepte);
495
        put_cpu_var(hugepd_freelist_cur);
B
Becky Bruce 已提交
496 497 498 499 500 501 502 503 504 505 506 507 508
		return;
	}

	if (*batchp == NULL) {
		*batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC);
		(*batchp)->index = 0;
	}

	(*batchp)->ptes[(*batchp)->index++] = hugepte;
	if ((*batchp)->index == HUGEPD_FREELIST_SIZE) {
		call_rcu_sched(&(*batchp)->rcu, hugepd_free_rcu_callback);
		*batchp = NULL;
	}
509
	put_cpu_var(hugepd_freelist_cur);
B
Becky Bruce 已提交
510 511 512
}
#endif

513 514 515
static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
			      unsigned long start, unsigned long end,
			      unsigned long floor, unsigned long ceiling)
516 517
{
	pte_t *hugepte = hugepd_page(*hpdp);
B
Becky Bruce 已提交
518 519
	int i;

520
	unsigned long pdmask = ~((1UL << pdshift) - 1);
B
Becky Bruce 已提交
521 522
	unsigned int num_hugepd = 1;

523 524
#ifdef CONFIG_PPC_FSL_BOOK3E
	/* Note: On fsl the hpdp may be the first of several */
B
Becky Bruce 已提交
525
	num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift));
526 527
#else
	unsigned int shift = hugepd_shift(*hpdp);
B
Becky Bruce 已提交
528
#endif
529 530 531 532 533 534 535 536 537 538 539

	start &= pdmask;
	if (start < floor)
		return;
	if (ceiling) {
		ceiling &= pdmask;
		if (! ceiling)
			return;
	}
	if (end - 1 > ceiling - 1)
		return;
540

B
Becky Bruce 已提交
541 542 543
	for (i = 0; i < num_hugepd; i++, hpdp++)
		hpdp->pd = 0;

544
#ifdef CONFIG_PPC_FSL_BOOK3E
B
Becky Bruce 已提交
545
	hugepd_free(tlb, hugepte);
546 547
#else
	pgtable_free_tlb(tlb, hugepte, pdshift - shift);
B
Becky Bruce 已提交
548
#endif
549 550 551 552
}

static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
				   unsigned long addr, unsigned long end,
553
				   unsigned long floor, unsigned long ceiling)
554 555 556 557 558 559 560
{
	pmd_t *pmd;
	unsigned long next;
	unsigned long start;

	start = addr;
	do {
561
		pmd = pmd_offset(pud, addr);
562
		next = pmd_addr_end(addr, end);
563
		if (!is_hugepd(__hugepd(pmd_val(*pmd)))) {
564 565 566 567 568
			/*
			 * if it is not hugepd pointer, we should already find
			 * it cleared.
			 */
			WARN_ON(!pmd_none_or_clear_bad(pmd));
569
			continue;
570
		}
571 572 573 574 575 576 577 578 579
#ifdef CONFIG_PPC_FSL_BOOK3E
		/*
		 * Increment next by the size of the huge mapping since
		 * there may be more than one entry at this level for a
		 * single hugepage, but all of them point to
		 * the same kmem cache that holds the hugepte.
		 */
		next = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
#endif
580 581
		free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
				  addr, next, floor, ceiling);
582
	} while (addr = next, addr != end);
583 584 585 586 587 588 589 590

	start &= PUD_MASK;
	if (start < floor)
		return;
	if (ceiling) {
		ceiling &= PUD_MASK;
		if (!ceiling)
			return;
L
Linus Torvalds 已提交
591
	}
592 593
	if (end - 1 > ceiling - 1)
		return;
L
Linus Torvalds 已提交
594

595 596
	pmd = pmd_offset(pud, start);
	pud_clear(pud);
597
	pmd_free_tlb(tlb, pmd, start);
598
	mm_dec_nr_pmds(tlb->mm);
599 600 601 602 603 604 605 606 607 608 609 610
}

static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
				   unsigned long addr, unsigned long end,
				   unsigned long floor, unsigned long ceiling)
{
	pud_t *pud;
	unsigned long next;
	unsigned long start;

	start = addr;
	do {
611
		pud = pud_offset(pgd, addr);
612
		next = pud_addr_end(addr, end);
613
		if (!is_hugepd(__hugepd(pud_val(*pud)))) {
614 615
			if (pud_none_or_clear_bad(pud))
				continue;
616
			hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
617
					       ceiling);
618
		} else {
619 620 621 622 623 624 625 626 627
#ifdef CONFIG_PPC_FSL_BOOK3E
			/*
			 * Increment next by the size of the huge mapping since
			 * there may be more than one entry at this level for a
			 * single hugepage, but all of them point to
			 * the same kmem cache that holds the hugepte.
			 */
			next = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
#endif
628 629
			free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
					  addr, next, floor, ceiling);
630
		}
631
	} while (addr = next, addr != end);
632 633 634 635 636 637 638 639 640 641 642 643 644 645

	start &= PGDIR_MASK;
	if (start < floor)
		return;
	if (ceiling) {
		ceiling &= PGDIR_MASK;
		if (!ceiling)
			return;
	}
	if (end - 1 > ceiling - 1)
		return;

	pud = pud_offset(pgd, start);
	pgd_clear(pgd);
646
	pud_free_tlb(tlb, pud, start);
647 648 649 650 651
}

/*
 * This function frees user-level page tables of a process.
 */
652
void hugetlb_free_pgd_range(struct mmu_gather *tlb,
653 654 655 656 657 658 659
			    unsigned long addr, unsigned long end,
			    unsigned long floor, unsigned long ceiling)
{
	pgd_t *pgd;
	unsigned long next;

	/*
660 661 662 663 664 665 666 667 668 669
	 * Because there are a number of different possible pagetable
	 * layouts for hugepage ranges, we limit knowledge of how
	 * things should be laid out to the allocation path
	 * (huge_pte_alloc(), above).  Everything else works out the
	 * structure as it goes from information in the hugepd
	 * pointers.  That means that we can't here use the
	 * optimization used in the normal page free_pgd_range(), of
	 * checking whether we're actually covering a large enough
	 * range to have to do anything at the top level of the walk
	 * instead of at the bottom.
670
	 *
671 672 673
	 * To make sense of this, you should probably go read the big
	 * block comment at the top of the normal free_pgd_range(),
	 * too.
674 675 676 677
	 */

	do {
		next = pgd_addr_end(addr, end);
B
Becky Bruce 已提交
678
		pgd = pgd_offset(tlb->mm, addr);
679
		if (!is_hugepd(__hugepd(pgd_val(*pgd)))) {
680 681 682 683
			if (pgd_none_or_clear_bad(pgd))
				continue;
			hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
		} else {
684
#ifdef CONFIG_PPC_FSL_BOOK3E
B
Becky Bruce 已提交
685 686
			/*
			 * Increment next by the size of the huge mapping since
687 688 689
			 * there may be more than one entry at the pgd level
			 * for a single hugepage, but all of them point to the
			 * same kmem cache that holds the hugepte.
B
Becky Bruce 已提交
690 691 692
			 */
			next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
#endif
693 694
			free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
					  addr, next, floor, ceiling);
695
		}
B
Becky Bruce 已提交
696
	} while (addr = next, addr != end);
L
Linus Torvalds 已提交
697 698
}

699 700 701 702
/*
 * We are holding mmap_sem, so a parallel huge page collapse cannot run.
 * To prevent hugepage split, disable irq.
 */
L
Linus Torvalds 已提交
703 704 705
struct page *
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
{
706
	pte_t *ptep, pte;
707
	unsigned shift;
708
	unsigned long mask, flags;
709 710 711 712 713 714 715
	struct page *page = ERR_PTR(-EINVAL);

	local_irq_save(flags);
	ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
	if (!ptep)
		goto no_page;
	pte = READ_ONCE(*ptep);
716
	/*
717
	 * Verify it is a huge page else bail.
718 719 720
	 * Transparent hugepages are handled by generic code. We can skip them
	 * here.
	 */
721 722
	if (!shift || pmd_trans_huge(__pmd(pte_val(pte))))
		goto no_page;
L
Linus Torvalds 已提交
723

724 725 726
	if (!pte_present(pte)) {
		page = NULL;
		goto no_page;
727
	}
728
	mask = (1UL << shift) - 1;
729
	page = pte_page(pte);
730 731
	if (page)
		page += (address & mask) / PAGE_SIZE;
L
Linus Torvalds 已提交
732

733
no_page:
734
	local_irq_restore(flags);
L
Linus Torvalds 已提交
735 736 737 738 739 740 741 742 743 744 745
	return page;
}

struct page *
follow_huge_pmd(struct mm_struct *mm, unsigned long address,
		pmd_t *pmd, int write)
{
	BUG();
	return NULL;
}

746 747 748 749 750 751 752 753
struct page *
follow_huge_pud(struct mm_struct *mm, unsigned long address,
		pud_t *pud, int write)
{
	BUG();
	return NULL;
}

D
David Gibson 已提交
754 755 756 757 758 759 760
static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
				      unsigned long sz)
{
	unsigned long __boundary = (addr + sz) & ~(sz-1);
	return (__boundary - 1 < end - 1) ? __boundary : end;
}

761 762
int gup_huge_pd(hugepd_t hugepd, unsigned long addr, unsigned pdshift,
		unsigned long end, int write, struct page **pages, int *nr)
763 764
{
	pte_t *ptep;
765
	unsigned long sz = 1UL << hugepd_shift(hugepd);
D
David Gibson 已提交
766
	unsigned long next;
767 768 769

	ptep = hugepte_offset(hugepd, addr, pdshift);
	do {
D
David Gibson 已提交
770
		next = hugepte_addr_end(addr, end, sz);
771 772
		if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
			return 0;
D
David Gibson 已提交
773
	} while (ptep++, addr = next, addr != end);
774 775 776

	return 1;
}
L
Linus Torvalds 已提交
777

778
#ifdef CONFIG_PPC_MM_SLICES
L
Linus Torvalds 已提交
779 780 781 782
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
					unsigned long len, unsigned long pgoff,
					unsigned long flags)
{
783 784
	struct hstate *hstate = hstate_file(file);
	int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
785

786
	return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
L
Linus Torvalds 已提交
787
}
788
#endif
L
Linus Torvalds 已提交
789

790 791
unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
{
792
#ifdef CONFIG_PPC_MM_SLICES
793 794 795
	unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);

	return 1UL << mmu_psize_to_shift(psize);
B
Becky Bruce 已提交
796 797 798 799 800 801 802 803 804 805 806 807 808
#else
	if (!is_vm_hugetlb_page(vma))
		return PAGE_SIZE;

	return huge_page_size(hstate_vma(vma));
#endif
}

static inline bool is_power_of_4(unsigned long x)
{
	if (is_power_of_2(x))
		return (__ilog2(x) % 2) ? false : true;
	return false;
809 810
}

811
static int __init add_huge_page_size(unsigned long long size)
812
{
813 814
	int shift = __ffs(size);
	int mmu_psize;
815

816
	/* Check that it is a page size supported by the hardware and
817
	 * that it fits within pagetable and slice limits. */
B
Becky Bruce 已提交
818 819 820 821
#ifdef CONFIG_PPC_FSL_BOOK3E
	if ((size < PAGE_SIZE) || !is_power_of_4(size))
		return -EINVAL;
#else
822 823 824
	if (!is_power_of_2(size)
	    || (shift > SLICE_HIGH_SHIFT) || (shift <= PAGE_SHIFT))
		return -EINVAL;
B
Becky Bruce 已提交
825
#endif
826

827 828 829 830 831 832 833 834 835 836 837 838
	if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
		return -EINVAL;

	BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);

	/* Return if huge page size has already been setup */
	if (size_to_hstate(size))
		return 0;

	hugetlb_add_hstate(shift - PAGE_SHIFT);

	return 0;
839 840 841 842 843 844 845 846
}

static int __init hugepage_setup_sz(char *str)
{
	unsigned long long size;

	size = memparse(str, &str);

847
	if (add_huge_page_size(size) != 0)
848 849 850 851 852 853
		printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size);

	return 1;
}
__setup("hugepagesz=", hugepage_setup_sz);

854
#ifdef CONFIG_PPC_FSL_BOOK3E
B
Becky Bruce 已提交
855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893
struct kmem_cache *hugepte_cache;
static int __init hugetlbpage_init(void)
{
	int psize;

	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
		unsigned shift;

		if (!mmu_psize_defs[psize].shift)
			continue;

		shift = mmu_psize_to_shift(psize);

		/* Don't treat normal page sizes as huge... */
		if (shift != PAGE_SHIFT)
			if (add_huge_page_size(1ULL << shift) < 0)
				continue;
	}

	/*
	 * Create a kmem cache for hugeptes.  The bottom bits in the pte have
	 * size information encoded in them, so align them to allow this
	 */
	hugepte_cache =  kmem_cache_create("hugepte-cache", sizeof(pte_t),
					   HUGEPD_SHIFT_MASK + 1, 0, NULL);
	if (hugepte_cache == NULL)
		panic("%s: Unable to create kmem cache for hugeptes\n",
		      __func__);

	/* Default hpage size = 4M */
	if (mmu_psize_defs[MMU_PAGE_4M].shift)
		HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift;
	else
		panic("%s: Unable to set default huge page size\n", __func__);


	return 0;
}
#else
894 895
static int __init hugetlbpage_init(void)
{
896
	int psize;
897

898
	if (!mmu_has_feature(MMU_FTR_16M_PAGE))
899
		return -ENODEV;
900

901 902 903
	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
		unsigned shift;
		unsigned pdshift;
904

905 906
		if (!mmu_psize_defs[psize].shift)
			continue;
907

908 909 910 911 912 913 914 915 916 917 918
		shift = mmu_psize_to_shift(psize);

		if (add_huge_page_size(1ULL << shift) < 0)
			continue;

		if (shift < PMD_SHIFT)
			pdshift = PMD_SHIFT;
		else if (shift < PUD_SHIFT)
			pdshift = PUD_SHIFT;
		else
			pdshift = PGDIR_SHIFT;
919 920 921 922 923 924 925 926 927 928
		/*
		 * if we have pdshift and shift value same, we don't
		 * use pgt cache for hugepd.
		 */
		if (pdshift != shift) {
			pgtable_cache_add(pdshift - shift, NULL);
			if (!PGT_CACHE(pdshift - shift))
				panic("hugetlbpage_init(): could not create "
				      "pgtable cache for %d bit pagesize\n", shift);
		}
929
	}
930

931 932 933 934 935 936 937 938
	/* Set default large page size. Currently, we pick 16M or 1M
	 * depending on what is available
	 */
	if (mmu_psize_defs[MMU_PAGE_16M].shift)
		HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
	else if (mmu_psize_defs[MMU_PAGE_1M].shift)
		HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;

939 940
	return 0;
}
B
Becky Bruce 已提交
941
#endif
942
arch_initcall(hugetlbpage_init);
943 944 945 946

void flush_dcache_icache_hugepage(struct page *page)
{
	int i;
B
Becky Bruce 已提交
947
	void *start;
948 949 950

	BUG_ON(!PageCompound(page));

B
Becky Bruce 已提交
951 952 953 954
	for (i = 0; i < (1UL << compound_order(page)); i++) {
		if (!PageHighMem(page)) {
			__flush_dcache_icache(page_address(page+i));
		} else {
955
			start = kmap_atomic(page+i);
B
Becky Bruce 已提交
956
			__flush_dcache_icache(start);
957
			kunmap_atomic(start);
B
Becky Bruce 已提交
958 959
		}
	}
960
}
961 962 963 964 965 966 967 968 969

#endif /* CONFIG_HUGETLB_PAGE */

/*
 * We have 4 cases for pgds and pmds:
 * (1) invalid (all zeroes)
 * (2) pointer to next table, as normal; bottom 6 bits == 0
 * (3) leaf pte for huge page, bottom two bits != 00
 * (4) hugepd pointer, bottom two bits == 00, next 4 bits indicate size of table
970 971 972
 *
 * So long as we atomically load page table pointers we are safe against teardown,
 * we can follow the address down to the the page and take a ref on it.
973 974
 * This function need to be called with interrupts disabled. We use this variant
 * when we have MSR[EE] = 0 but the paca->soft_enabled = 1
975
 */
976

977 978
pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
				   unsigned *shift)
979
{
980 981 982
	pgd_t pgd, *pgdp;
	pud_t pud, *pudp;
	pmd_t pmd, *pmdp;
983 984 985 986 987 988 989
	pte_t *ret_pte;
	hugepd_t *hpdp = NULL;
	unsigned pdshift = PGDIR_SHIFT;

	if (shift)
		*shift = 0;

990
	pgdp = pgdir + pgd_index(ea);
991
	pgd  = READ_ONCE(*pgdp);
992
	/*
993 994 995 996
	 * Always operate on the local stack value. This make sure the
	 * value don't get updated by a parallel THP split/collapse,
	 * page fault or a page unmap. The return pte_t * is still not
	 * stable. So should be checked there for above conditions.
997
	 */
998
	if (pgd_none(pgd))
999
		return NULL;
1000 1001
	else if (pgd_huge(pgd)) {
		ret_pte = (pte_t *) pgdp;
1002
		goto out;
1003
	} else if (is_hugepd(__hugepd(pgd_val(pgd))))
1004
		hpdp = (hugepd_t *)&pgd;
1005
	else {
1006 1007 1008 1009 1010
		/*
		 * Even if we end up with an unmap, the pgtable will not
		 * be freed, because we do an rcu free and here we are
		 * irq disabled
		 */
1011
		pdshift = PUD_SHIFT;
1012
		pudp = pud_offset(&pgd, ea);
1013
		pud  = READ_ONCE(*pudp);
1014

1015
		if (pud_none(pud))
1016
			return NULL;
1017 1018
		else if (pud_huge(pud)) {
			ret_pte = (pte_t *) pudp;
1019
			goto out;
1020
		} else if (is_hugepd(__hugepd(pud_val(pud))))
1021
			hpdp = (hugepd_t *)&pud;
1022
		else {
1023
			pdshift = PMD_SHIFT;
1024
			pmdp = pmd_offset(&pud, ea);
1025
			pmd  = READ_ONCE(*pmdp);
1026 1027 1028 1029
			/*
			 * A hugepage collapse is captured by pmd_none, because
			 * it mark the pmd none and do a hpte invalidate.
			 *
1030 1031 1032
			 * We don't worry about pmd_trans_splitting here, The
			 * caller if it needs to handle the splitting case
			 * should check for that.
1033
			 */
1034
			if (pmd_none(pmd))
1035
				return NULL;
1036

1037 1038
			if (pmd_huge(pmd) || pmd_large(pmd)) {
				ret_pte = (pte_t *) pmdp;
1039
				goto out;
1040
			} else if (is_hugepd(__hugepd(pmd_val(pmd))))
1041
				hpdp = (hugepd_t *)&pmd;
1042
			else
1043
				return pte_offset_kernel(&pmd, ea);
1044 1045 1046 1047 1048
		}
	}
	if (!hpdp)
		return NULL;

1049
	ret_pte = hugepte_offset(*hpdp, ea, pdshift);
1050 1051 1052 1053 1054 1055
	pdshift = hugepd_shift(*hpdp);
out:
	if (shift)
		*shift = pdshift;
	return ret_pte;
}
1056
EXPORT_SYMBOL_GPL(__find_linux_pte_or_hugepte);
1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070

int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
		unsigned long end, int write, struct page **pages, int *nr)
{
	unsigned long mask;
	unsigned long pte_end;
	struct page *head, *page, *tail;
	pte_t pte;
	int refs;

	pte_end = (addr + sz) & ~(sz-1);
	if (pte_end < end)
		end = pte_end;

1071
	pte = READ_ONCE(*ptep);
1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119
	mask = _PAGE_PRESENT | _PAGE_USER;
	if (write)
		mask |= _PAGE_RW;

	if ((pte_val(pte) & mask) != mask)
		return 0;

	/* hugepages are never "special" */
	VM_BUG_ON(!pfn_valid(pte_pfn(pte)));

	refs = 0;
	head = pte_page(pte);

	page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
	tail = page;
	do {
		VM_BUG_ON(compound_head(page) != head);
		pages[*nr] = page;
		(*nr)++;
		page++;
		refs++;
	} while (addr += PAGE_SIZE, addr != end);

	if (!page_cache_add_speculative(head, refs)) {
		*nr -= refs;
		return 0;
	}

	if (unlikely(pte_val(pte) != pte_val(*ptep))) {
		/* Could be optimized better */
		*nr -= refs;
		while (refs--)
			put_page(head);
		return 0;
	}

	/*
	 * Any tail page need their mapcount reference taken before we
	 * return.
	 */
	while (refs--) {
		if (PageTail(tail))
			get_huge_page_tail(tail);
		tail++;
	}

	return 1;
}