hugetlb.c 164.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
L
Linus Torvalds 已提交
2 3
/*
 * Generic hugetlb support.
4
 * (C) Nadia Yvette Chambers, April 2004
L
Linus Torvalds 已提交
5 6 7 8
 */
#include <linux/list.h>
#include <linux/init.h>
#include <linux/mm.h>
9
#include <linux/seq_file.h>
L
Linus Torvalds 已提交
10 11
#include <linux/sysctl.h>
#include <linux/highmem.h>
A
Andrea Arcangeli 已提交
12
#include <linux/mmu_notifier.h>
L
Linus Torvalds 已提交
13
#include <linux/nodemask.h>
D
David Gibson 已提交
14
#include <linux/pagemap.h>
15
#include <linux/mempolicy.h>
16
#include <linux/compiler.h>
17
#include <linux/cpuset.h>
18
#include <linux/mutex.h>
19
#include <linux/memblock.h>
20
#include <linux/sysfs.h>
21
#include <linux/slab.h>
22
#include <linux/sched/mm.h>
23
#include <linux/mmdebug.h>
24
#include <linux/sched/signal.h>
25
#include <linux/rmap.h>
26
#include <linux/string_helpers.h>
27 28
#include <linux/swap.h>
#include <linux/swapops.h>
29
#include <linux/jhash.h>
30
#include <linux/numa.h>
31
#include <linux/llist.h>
32
#include <linux/cma.h>
33

D
David Gibson 已提交
34
#include <asm/page.h>
35
#include <asm/pgalloc.h>
36
#include <asm/tlb.h>
D
David Gibson 已提交
37

38
#include <linux/io.h>
D
David Gibson 已提交
39
#include <linux/hugetlb.h>
40
#include <linux/hugetlb_cgroup.h>
41
#include <linux/node.h>
42
#include <linux/page_owner.h>
43
#include "internal.h"
L
Linus Torvalds 已提交
44

45
int hugetlb_max_hstate __read_mostly;
46 47
unsigned int default_hstate_idx;
struct hstate hstates[HUGE_MAX_HSTATE];
48

49
#ifdef CONFIG_CMA
50
static struct cma *hugetlb_cma[MAX_NUMNODES];
51 52
#endif
static unsigned long hugetlb_cma_size __initdata;
53

54 55 56 57 58
/*
 * Minimum page order among possible hugepage sizes, set to a proper value
 * at boot time.
 */
static unsigned int minimum_order __read_mostly = UINT_MAX;
59

60 61
__initdata LIST_HEAD(huge_boot_pages);

62 63 64
/* for command line parsing */
static struct hstate * __initdata parsed_hstate;
static unsigned long __initdata default_hstate_max_huge_pages;
65
static bool __initdata parsed_valid_hugepagesz = true;
66
static bool __initdata parsed_default_hugepagesz;
67

68
/*
69 70
 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
 * free_huge_pages, and surplus_huge_pages.
71
 */
72
DEFINE_SPINLOCK(hugetlb_lock);
73

74 75 76 77 78
/*
 * Serializes faults on the same logical page.  This is used to
 * prevent spurious OOMs when the hugepage pool is fully utilized.
 */
static int num_fault_mutexes;
79
struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
80

81 82 83
/* Forward declaration */
static int hugetlb_acct_memory(struct hstate *h, long delta);

84
static inline bool subpool_is_free(struct hugepage_subpool *spool)
85
{
86 87 88 89 90 91 92 93 94
	if (spool->count)
		return false;
	if (spool->max_hpages != -1)
		return spool->used_hpages == 0;
	if (spool->min_hpages != -1)
		return spool->rsv_hpages == spool->min_hpages;

	return true;
}
95

96 97
static inline void unlock_or_release_subpool(struct hugepage_subpool *spool,
						unsigned long irq_flags)
98
{
99
	spin_unlock_irqrestore(&spool->lock, irq_flags);
100 101

	/* If no pages are used, and no other handles to the subpool
E
Ethon Paul 已提交
102
	 * remain, give up any reservations based on minimum size and
103
	 * free the subpool */
104
	if (subpool_is_free(spool)) {
105 106 107
		if (spool->min_hpages != -1)
			hugetlb_acct_memory(spool->hstate,
						-spool->min_hpages);
108
		kfree(spool);
109
	}
110 111
}

112 113
struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
						long min_hpages)
114 115 116
{
	struct hugepage_subpool *spool;

117
	spool = kzalloc(sizeof(*spool), GFP_KERNEL);
118 119 120 121 122
	if (!spool)
		return NULL;

	spin_lock_init(&spool->lock);
	spool->count = 1;
123 124 125 126 127 128 129 130 131
	spool->max_hpages = max_hpages;
	spool->hstate = h;
	spool->min_hpages = min_hpages;

	if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
		kfree(spool);
		return NULL;
	}
	spool->rsv_hpages = min_hpages;
132 133 134 135 136 137

	return spool;
}

void hugepage_put_subpool(struct hugepage_subpool *spool)
{
138 139 140
	unsigned long flags;

	spin_lock_irqsave(&spool->lock, flags);
141 142
	BUG_ON(!spool->count);
	spool->count--;
143
	unlock_or_release_subpool(spool, flags);
144 145
}

146 147 148
/*
 * Subpool accounting for allocating and reserving pages.
 * Return -ENOMEM if there are not enough resources to satisfy the
149
 * request.  Otherwise, return the number of pages by which the
150 151
 * global pools must be adjusted (upward).  The returned value may
 * only be different than the passed value (delta) in the case where
E
Ethon Paul 已提交
152
 * a subpool minimum size must be maintained.
153 154
 */
static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
155 156
				      long delta)
{
157
	long ret = delta;
158 159

	if (!spool)
160
		return ret;
161

162
	spin_lock_irq(&spool->lock);
163 164 165 166 167 168 169 170

	if (spool->max_hpages != -1) {		/* maximum size accounting */
		if ((spool->used_hpages + delta) <= spool->max_hpages)
			spool->used_hpages += delta;
		else {
			ret = -ENOMEM;
			goto unlock_ret;
		}
171 172
	}

173 174
	/* minimum size accounting */
	if (spool->min_hpages != -1 && spool->rsv_hpages) {
175 176 177 178 179 180 181 182 183 184 185 186 187 188
		if (delta > spool->rsv_hpages) {
			/*
			 * Asking for more reserves than those already taken on
			 * behalf of subpool.  Return difference.
			 */
			ret = delta - spool->rsv_hpages;
			spool->rsv_hpages = 0;
		} else {
			ret = 0;	/* reserves already accounted for */
			spool->rsv_hpages -= delta;
		}
	}

unlock_ret:
189
	spin_unlock_irq(&spool->lock);
190 191 192
	return ret;
}

193 194 195 196 197 198 199
/*
 * Subpool accounting for freeing and unreserving pages.
 * Return the number of global page reservations that must be dropped.
 * The return value may only be different than the passed value (delta)
 * in the case where a subpool minimum size must be maintained.
 */
static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
200 201
				       long delta)
{
202
	long ret = delta;
203
	unsigned long flags;
204

205
	if (!spool)
206
		return delta;
207

208
	spin_lock_irqsave(&spool->lock, flags);
209 210 211 212

	if (spool->max_hpages != -1)		/* maximum size accounting */
		spool->used_hpages -= delta;

213 214
	 /* minimum size accounting */
	if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
215 216 217 218 219 220 221 222 223 224 225 226 227 228
		if (spool->rsv_hpages + delta <= spool->min_hpages)
			ret = 0;
		else
			ret = spool->rsv_hpages + delta - spool->min_hpages;

		spool->rsv_hpages += delta;
		if (spool->rsv_hpages > spool->min_hpages)
			spool->rsv_hpages = spool->min_hpages;
	}

	/*
	 * If hugetlbfs_put_super couldn't free spool due to an outstanding
	 * quota reference, free it now.
	 */
229
	unlock_or_release_subpool(spool, flags);
230 231

	return ret;
232 233 234 235 236 237 238 239 240
}

static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
{
	return HUGETLBFS_SB(inode->i_sb)->spool;
}

static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
{
A
Al Viro 已提交
241
	return subpool_inode(file_inode(vma->vm_file));
242 243
}

244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
/* Helper that removes a struct file_region from the resv_map cache and returns
 * it for use.
 */
static struct file_region *
get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)
{
	struct file_region *nrg = NULL;

	VM_BUG_ON(resv->region_cache_count <= 0);

	resv->region_cache_count--;
	nrg = list_first_entry(&resv->region_cache, struct file_region, link);
	list_del(&nrg->link);

	nrg->from = from;
	nrg->to = to;

	return nrg;
}

264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg,
					      struct file_region *rg)
{
#ifdef CONFIG_CGROUP_HUGETLB
	nrg->reservation_counter = rg->reservation_counter;
	nrg->css = rg->css;
	if (rg->css)
		css_get(rg->css);
#endif
}

/* Helper that records hugetlb_cgroup uncharge info. */
static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
						struct hstate *h,
						struct resv_map *resv,
						struct file_region *nrg)
{
#ifdef CONFIG_CGROUP_HUGETLB
	if (h_cg) {
		nrg->reservation_counter =
			&h_cg->rsvd_hugepage[hstate_index(h)];
		nrg->css = &h_cg->css;
286 287 288 289 290 291 292 293 294 295 296
		/*
		 * The caller will hold exactly one h_cg->css reference for the
		 * whole contiguous reservation region. But this area might be
		 * scattered when there are already some file_regions reside in
		 * it. As a result, many file_regions may share only one css
		 * reference. In order to ensure that one file_region must hold
		 * exactly one h_cg->css reference, we should do css_get for
		 * each file_region and leave the reference held by caller
		 * untouched.
		 */
		css_get(&h_cg->css);
297 298 299 300 301 302 303 304 305 306 307 308 309
		if (!resv->pages_per_hpage)
			resv->pages_per_hpage = pages_per_huge_page(h);
		/* pages_per_hpage should be the same for all entries in
		 * a resv_map.
		 */
		VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h));
	} else {
		nrg->reservation_counter = NULL;
		nrg->css = NULL;
	}
#endif
}

310 311 312 313 314 315 316 317
static void put_uncharge_info(struct file_region *rg)
{
#ifdef CONFIG_CGROUP_HUGETLB
	if (rg->css)
		css_put(rg->css);
#endif
}

318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340
static bool has_same_uncharge_info(struct file_region *rg,
				   struct file_region *org)
{
#ifdef CONFIG_CGROUP_HUGETLB
	return rg && org &&
	       rg->reservation_counter == org->reservation_counter &&
	       rg->css == org->css;

#else
	return true;
#endif
}

static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
{
	struct file_region *nrg = NULL, *prg = NULL;

	prg = list_prev_entry(rg, link);
	if (&prg->link != &resv->regions && prg->to == rg->from &&
	    has_same_uncharge_info(prg, rg)) {
		prg->to = rg->to;

		list_del(&rg->link);
341
		put_uncharge_info(rg);
342 343
		kfree(rg);

344
		rg = prg;
345 346 347 348 349 350 351 352
	}

	nrg = list_next_entry(rg, link);
	if (&nrg->link != &resv->regions && nrg->from == rg->to &&
	    has_same_uncharge_info(nrg, rg)) {
		nrg->from = rg->from;

		list_del(&rg->link);
353
		put_uncharge_info(rg);
354 355 356 357
		kfree(rg);
	}
}

358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
static inline long
hugetlb_resv_map_add(struct resv_map *map, struct file_region *rg, long from,
		     long to, struct hstate *h, struct hugetlb_cgroup *cg,
		     long *regions_needed)
{
	struct file_region *nrg;

	if (!regions_needed) {
		nrg = get_file_region_entry_from_cache(map, from, to);
		record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg);
		list_add(&nrg->link, rg->link.prev);
		coalesce_file_region(map, nrg);
	} else
		*regions_needed += 1;

	return to - from;
}

376 377 378 379 380 381 382
/*
 * Must be called with resv->lock held.
 *
 * Calling this with regions_needed != NULL will count the number of pages
 * to be added but will not modify the linked list. And regions_needed will
 * indicate the number of file_regions needed in the cache to carry out to add
 * the regions for this range.
M
Mina Almasry 已提交
383 384
 */
static long add_reservation_in_range(struct resv_map *resv, long f, long t,
385
				     struct hugetlb_cgroup *h_cg,
386
				     struct hstate *h, long *regions_needed)
M
Mina Almasry 已提交
387
{
388
	long add = 0;
M
Mina Almasry 已提交
389
	struct list_head *head = &resv->regions;
390
	long last_accounted_offset = f;
391
	struct file_region *rg = NULL, *trg = NULL;
M
Mina Almasry 已提交
392

393 394
	if (regions_needed)
		*regions_needed = 0;
M
Mina Almasry 已提交
395

396 397 398 399 400 401 402 403 404 405 406 407 408 409
	/* In this loop, we essentially handle an entry for the range
	 * [last_accounted_offset, rg->from), at every iteration, with some
	 * bounds checking.
	 */
	list_for_each_entry_safe(rg, trg, head, link) {
		/* Skip irrelevant regions that start before our range. */
		if (rg->from < f) {
			/* If this region ends after the last accounted offset,
			 * then we need to update last_accounted_offset.
			 */
			if (rg->to > last_accounted_offset)
				last_accounted_offset = rg->to;
			continue;
		}
M
Mina Almasry 已提交
410

411 412 413
		/* When we find a region that starts beyond our range, we've
		 * finished.
		 */
414
		if (rg->from >= t)
M
Mina Almasry 已提交
415 416
			break;

417 418 419
		/* Add an entry for last_accounted_offset -> rg->from, and
		 * update last_accounted_offset.
		 */
420 421 422 423 424
		if (rg->from > last_accounted_offset)
			add += hugetlb_resv_map_add(resv, rg,
						    last_accounted_offset,
						    rg->from, h, h_cg,
						    regions_needed);
425 426 427 428 429 430 431

		last_accounted_offset = rg->to;
	}

	/* Handle the case where our range extends beyond
	 * last_accounted_offset.
	 */
432 433 434
	if (last_accounted_offset < t)
		add += hugetlb_resv_map_add(resv, rg, last_accounted_offset,
					    t, h, h_cg, regions_needed);
435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468

	VM_BUG_ON(add < 0);
	return add;
}

/* Must be called with resv->lock acquired. Will drop lock to allocate entries.
 */
static int allocate_file_region_entries(struct resv_map *resv,
					int regions_needed)
	__must_hold(&resv->lock)
{
	struct list_head allocated_regions;
	int to_allocate = 0, i = 0;
	struct file_region *trg = NULL, *rg = NULL;

	VM_BUG_ON(regions_needed < 0);

	INIT_LIST_HEAD(&allocated_regions);

	/*
	 * Check for sufficient descriptors in the cache to accommodate
	 * the number of in progress add operations plus regions_needed.
	 *
	 * This is a while loop because when we drop the lock, some other call
	 * to region_add or region_del may have consumed some region_entries,
	 * so we keep looping here until we finally have enough entries for
	 * (adds_in_progress + regions_needed).
	 */
	while (resv->region_cache_count <
	       (resv->adds_in_progress + regions_needed)) {
		to_allocate = resv->adds_in_progress + regions_needed -
			      resv->region_cache_count;

		/* At this point, we should have enough entries in the cache
I
Ingo Molnar 已提交
469
		 * for all the existing adds_in_progress. We should only be
470
		 * needing to allocate for regions_needed.
M
Mina Almasry 已提交
471
		 */
472 473 474 475 476 477 478 479
		VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress);

		spin_unlock(&resv->lock);
		for (i = 0; i < to_allocate; i++) {
			trg = kmalloc(sizeof(*trg), GFP_KERNEL);
			if (!trg)
				goto out_of_memory;
			list_add(&trg->link, &allocated_regions);
M
Mina Almasry 已提交
480 481
		}

482 483
		spin_lock(&resv->lock);

484 485
		list_splice(&allocated_regions, &resv->region_cache);
		resv->region_cache_count += to_allocate;
M
Mina Almasry 已提交
486 487
	}

488
	return 0;
M
Mina Almasry 已提交
489

490 491 492 493 494 495
out_of_memory:
	list_for_each_entry_safe(rg, trg, &allocated_regions, link) {
		list_del(&rg->link);
		kfree(rg);
	}
	return -ENOMEM;
M
Mina Almasry 已提交
496 497
}

498 499
/*
 * Add the huge page range represented by [f, t) to the reserve
500 501 502 503 504
 * map.  Regions will be taken from the cache to fill in this range.
 * Sufficient regions should exist in the cache due to the previous
 * call to region_chg with the same range, but in some cases the cache will not
 * have sufficient entries due to races with other code doing region_add or
 * region_del.  The extra needed entries will be allocated.
505
 *
506 507 508 509
 * regions_needed is the out value provided by a previous call to region_chg.
 *
 * Return the number of new huge pages added to the map.  This number is greater
 * than or equal to zero.  If file_region entries needed to be allocated for
E
Ethon Paul 已提交
510
 * this operation and we were not able to allocate, it returns -ENOMEM.
511 512 513
 * region_add of regions of length 1 never allocate file_regions and cannot
 * fail; region_chg will always allocate at least 1 entry and a region_add for
 * 1 page will only require at most 1 entry.
514
 */
515
static long region_add(struct resv_map *resv, long f, long t,
516 517
		       long in_regions_needed, struct hstate *h,
		       struct hugetlb_cgroup *h_cg)
518
{
519
	long add = 0, actual_regions_needed = 0;
520

521
	spin_lock(&resv->lock);
522 523 524
retry:

	/* Count how many regions are actually needed to execute this add. */
525 526
	add_reservation_in_range(resv, f, t, NULL, NULL,
				 &actual_regions_needed);
527

528
	/*
529 530 531 532 533 534 535
	 * Check for sufficient descriptors in the cache to accommodate
	 * this add operation. Note that actual_regions_needed may be greater
	 * than in_regions_needed, as the resv_map may have been modified since
	 * the region_chg call. In this case, we need to make sure that we
	 * allocate extra entries, such that we have enough for all the
	 * existing adds_in_progress, plus the excess needed for this
	 * operation.
536
	 */
537 538 539 540 541 542 543 544
	if (actual_regions_needed > in_regions_needed &&
	    resv->region_cache_count <
		    resv->adds_in_progress +
			    (actual_regions_needed - in_regions_needed)) {
		/* region_add operation of range 1 should never need to
		 * allocate file_region entries.
		 */
		VM_BUG_ON(t - f <= 1);
545

546 547 548 549
		if (allocate_file_region_entries(
			    resv, actual_regions_needed - in_regions_needed)) {
			return -ENOMEM;
		}
550

551
		goto retry;
552 553
	}

554
	add = add_reservation_in_range(resv, f, t, h_cg, h, NULL);
555 556

	resv->adds_in_progress -= in_regions_needed;
557

558
	spin_unlock(&resv->lock);
559
	return add;
560 561
}

562 563 564 565 566 567 568
/*
 * Examine the existing reserve map and determine how many
 * huge pages in the specified range [f, t) are NOT currently
 * represented.  This routine is called before a subsequent
 * call to region_add that will actually modify the reserve
 * map to add the specified range [f, t).  region_chg does
 * not change the number of huge pages represented by the
569 570 571 572 573 574 575
 * map.  A number of new file_region structures is added to the cache as a
 * placeholder, for the subsequent region_add call to use. At least 1
 * file_region structure is added.
 *
 * out_regions_needed is the number of regions added to the
 * resv->adds_in_progress.  This value needs to be provided to a follow up call
 * to region_add or region_abort for proper accounting.
576 577 578 579 580
 *
 * Returns the number of huge pages that need to be added to the existing
 * reservation map for the range [f, t).  This number is greater or equal to
 * zero.  -ENOMEM is returned if a new file_region structure or cache entry
 * is needed and can not be allocated.
581
 */
582 583
static long region_chg(struct resv_map *resv, long f, long t,
		       long *out_regions_needed)
584 585 586
{
	long chg = 0;

587
	spin_lock(&resv->lock);
588

589
	/* Count how many hugepages in this range are NOT represented. */
590
	chg = add_reservation_in_range(resv, f, t, NULL, NULL,
591
				       out_regions_needed);
592

593 594
	if (*out_regions_needed == 0)
		*out_regions_needed = 1;
595

596 597
	if (allocate_file_region_entries(resv, *out_regions_needed))
		return -ENOMEM;
598

599
	resv->adds_in_progress += *out_regions_needed;
600 601

	spin_unlock(&resv->lock);
602 603 604
	return chg;
}

605 606 607 608 609
/*
 * Abort the in progress add operation.  The adds_in_progress field
 * of the resv_map keeps track of the operations in progress between
 * calls to region_chg and region_add.  Operations are sometimes
 * aborted after the call to region_chg.  In such cases, region_abort
610 611 612
 * is called to decrement the adds_in_progress counter. regions_needed
 * is the value returned by the region_chg call, it is used to decrement
 * the adds_in_progress counter.
613 614 615 616 617
 *
 * NOTE: The range arguments [f, t) are not needed or used in this
 * routine.  They are kept to make reading the calling code easier as
 * arguments will match the associated region_chg call.
 */
618 619
static void region_abort(struct resv_map *resv, long f, long t,
			 long regions_needed)
620 621 622
{
	spin_lock(&resv->lock);
	VM_BUG_ON(!resv->region_cache_count);
623
	resv->adds_in_progress -= regions_needed;
624 625 626
	spin_unlock(&resv->lock);
}

627
/*
628 629 630 631 632 633 634 635 636 637 638 639
 * Delete the specified range [f, t) from the reserve map.  If the
 * t parameter is LONG_MAX, this indicates that ALL regions after f
 * should be deleted.  Locate the regions which intersect [f, t)
 * and either trim, delete or split the existing regions.
 *
 * Returns the number of huge pages deleted from the reserve map.
 * In the normal case, the return value is zero or more.  In the
 * case where a region must be split, a new region descriptor must
 * be allocated.  If the allocation fails, -ENOMEM will be returned.
 * NOTE: If the parameter t == LONG_MAX, then we will never split
 * a region and possibly return -ENOMEM.  Callers specifying
 * t == LONG_MAX do not need to check for -ENOMEM error.
640
 */
641
static long region_del(struct resv_map *resv, long f, long t)
642
{
643
	struct list_head *head = &resv->regions;
644
	struct file_region *rg, *trg;
645 646
	struct file_region *nrg = NULL;
	long del = 0;
647

648
retry:
649
	spin_lock(&resv->lock);
650
	list_for_each_entry_safe(rg, trg, head, link) {
651 652 653 654 655 656 657 658
		/*
		 * Skip regions before the range to be deleted.  file_region
		 * ranges are normally of the form [from, to).  However, there
		 * may be a "placeholder" entry in the map which is of the form
		 * (from, to) with from == to.  Check for placeholder entries
		 * at the beginning of the range to be deleted.
		 */
		if (rg->to <= f && (rg->to != rg->from || rg->to != f))
659
			continue;
660

661
		if (rg->from >= t)
662 663
			break;

664 665 666 667 668 669 670 671 672 673 674 675 676
		if (f > rg->from && t < rg->to) { /* Must split region */
			/*
			 * Check for an entry in the cache before dropping
			 * lock and attempting allocation.
			 */
			if (!nrg &&
			    resv->region_cache_count > resv->adds_in_progress) {
				nrg = list_first_entry(&resv->region_cache,
							struct file_region,
							link);
				list_del(&nrg->link);
				resv->region_cache_count--;
			}
677

678 679 680 681 682 683 684 685 686
			if (!nrg) {
				spin_unlock(&resv->lock);
				nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
				if (!nrg)
					return -ENOMEM;
				goto retry;
			}

			del += t - f;
687
			hugetlb_cgroup_uncharge_file_region(
688
				resv, rg, t - f, false);
689 690 691 692

			/* New entry for end of split region */
			nrg->from = t;
			nrg->to = rg->to;
693 694 695

			copy_hugetlb_cgroup_uncharge_info(nrg, rg);

696 697 698 699 700 701 702
			INIT_LIST_HEAD(&nrg->link);

			/* Original entry is trimmed */
			rg->to = f;

			list_add(&nrg->link, &rg->link);
			nrg = NULL;
703
			break;
704 705 706 707
		}

		if (f <= rg->from && t >= rg->to) { /* Remove entire region */
			del += rg->to - rg->from;
708
			hugetlb_cgroup_uncharge_file_region(resv, rg,
709
							    rg->to - rg->from, true);
710 711 712 713 714 715
			list_del(&rg->link);
			kfree(rg);
			continue;
		}

		if (f <= rg->from) {	/* Trim beginning of region */
716
			hugetlb_cgroup_uncharge_file_region(resv, rg,
717
							    t - rg->from, false);
718

719 720 721
			del += t - rg->from;
			rg->from = t;
		} else {		/* Trim end of region */
722
			hugetlb_cgroup_uncharge_file_region(resv, rg,
723
							    rg->to - f, false);
724 725 726

			del += rg->to - f;
			rg->to = f;
727
		}
728
	}
729 730

	spin_unlock(&resv->lock);
731 732
	kfree(nrg);
	return del;
733 734
}

735 736 737 738 739 740 741 742 743
/*
 * A rare out of memory error was encountered which prevented removal of
 * the reserve map region for a page.  The huge page itself was free'ed
 * and removed from the page cache.  This routine will adjust the subpool
 * usage count, and the global reserve count if needed.  By incrementing
 * these counts, the reserve map entry which could not be deleted will
 * appear as a "reserved" entry instead of simply dangling with incorrect
 * counts.
 */
744
void hugetlb_fix_reserve_counts(struct inode *inode)
745 746 747
{
	struct hugepage_subpool *spool = subpool_inode(inode);
	long rsv_adjust;
748
	bool reserved = false;
749 750

	rsv_adjust = hugepage_subpool_get_pages(spool, 1);
751
	if (rsv_adjust > 0) {
752 753
		struct hstate *h = hstate_inode(inode);

754 755 756 757
		if (!hugetlb_acct_memory(h, 1))
			reserved = true;
	} else if (!rsv_adjust) {
		reserved = true;
758
	}
759 760 761

	if (!reserved)
		pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
762 763
}

764 765 766 767
/*
 * Count and return the number of huge pages in the reserve map
 * that intersect with the range [f, t).
 */
768
static long region_count(struct resv_map *resv, long f, long t)
769
{
770
	struct list_head *head = &resv->regions;
771 772 773
	struct file_region *rg;
	long chg = 0;

774
	spin_lock(&resv->lock);
775 776
	/* Locate each segment we overlap with, and count that overlap. */
	list_for_each_entry(rg, head, link) {
777 778
		long seg_from;
		long seg_to;
779 780 781 782 783 784 785 786 787 788 789

		if (rg->to <= f)
			continue;
		if (rg->from >= t)
			break;

		seg_from = max(rg->from, f);
		seg_to = min(rg->to, t);

		chg += seg_to - seg_from;
	}
790
	spin_unlock(&resv->lock);
791 792 793 794

	return chg;
}

795 796 797 798
/*
 * Convert the address within this vma to the page offset within
 * the mapping, in pagecache page units; huge pages here.
 */
799 800
static pgoff_t vma_hugecache_offset(struct hstate *h,
			struct vm_area_struct *vma, unsigned long address)
801
{
802 803
	return ((address - vma->vm_start) >> huge_page_shift(h)) +
			(vma->vm_pgoff >> huge_page_order(h));
804 805
}

806 807 808 809 810
pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
				     unsigned long address)
{
	return vma_hugecache_offset(hstate_vma(vma), vma, address);
}
811
EXPORT_SYMBOL_GPL(linear_hugepage_index);
812

813 814 815 816 817 818
/*
 * Return the size of the pages allocated when backing a VMA. In the majority
 * cases this will be same size as used by the page table entries.
 */
unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
{
819 820 821
	if (vma->vm_ops && vma->vm_ops->pagesize)
		return vma->vm_ops->pagesize(vma);
	return PAGE_SIZE;
822
}
823
EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
824

825 826 827
/*
 * Return the page size being used by the MMU to back a VMA. In the majority
 * of cases, the page size used by the kernel matches the MMU size. On
828 829
 * architectures where it differs, an architecture-specific 'strong'
 * version of this symbol is required.
830
 */
831
__weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
832 833 834 835
{
	return vma_kernel_pagesize(vma);
}

836 837 838 839 840 841 842
/*
 * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
 * bits of the reservation map pointer, which are always clear due to
 * alignment.
 */
#define HPAGE_RESV_OWNER    (1UL << 0)
#define HPAGE_RESV_UNMAPPED (1UL << 1)
843
#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
844

845 846 847 848 849 850 851 852 853
/*
 * These helpers are used to track how many pages are reserved for
 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
 * is guaranteed to have their future faults succeed.
 *
 * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
 * the reserve counters are updated with the hugetlb_lock held. It is safe
 * to reset the VMA at fork() time as it is not in use yet and there is no
 * chance of the global counters getting corrupted as a result of the values.
854 855 856 857 858 859 860 861 862
 *
 * The private mapping reservation is represented in a subtly different
 * manner to a shared mapping.  A shared mapping has a region map associated
 * with the underlying file, this region map represents the backing file
 * pages which have ever had a reservation assigned which this persists even
 * after the page is instantiated.  A private mapping has a region map
 * associated with the original mmap which is attached to all VMAs which
 * reference it, this region map represents those offsets which have consumed
 * reservation ie. where pages have been instantiated.
863
 */
864 865 866 867 868 869 870 871 872 873 874
static unsigned long get_vma_private_data(struct vm_area_struct *vma)
{
	return (unsigned long)vma->vm_private_data;
}

static void set_vma_private_data(struct vm_area_struct *vma,
							unsigned long value)
{
	vma->vm_private_data = (void *)value;
}

875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893
static void
resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map,
					  struct hugetlb_cgroup *h_cg,
					  struct hstate *h)
{
#ifdef CONFIG_CGROUP_HUGETLB
	if (!h_cg || !h) {
		resv_map->reservation_counter = NULL;
		resv_map->pages_per_hpage = 0;
		resv_map->css = NULL;
	} else {
		resv_map->reservation_counter =
			&h_cg->rsvd_hugepage[hstate_index(h)];
		resv_map->pages_per_hpage = pages_per_huge_page(h);
		resv_map->css = &h_cg->css;
	}
#endif
}

894
struct resv_map *resv_map_alloc(void)
895 896
{
	struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
897 898 899 900 901
	struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);

	if (!resv_map || !rg) {
		kfree(resv_map);
		kfree(rg);
902
		return NULL;
903
	}
904 905

	kref_init(&resv_map->refs);
906
	spin_lock_init(&resv_map->lock);
907 908
	INIT_LIST_HEAD(&resv_map->regions);

909
	resv_map->adds_in_progress = 0;
910 911 912 913 914 915 916
	/*
	 * Initialize these to 0. On shared mappings, 0's here indicate these
	 * fields don't do cgroup accounting. On private mappings, these will be
	 * re-initialized to the proper values, to indicate that hugetlb cgroup
	 * reservations are to be un-charged from here.
	 */
	resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL);
917 918 919 920 921

	INIT_LIST_HEAD(&resv_map->region_cache);
	list_add(&rg->link, &resv_map->region_cache);
	resv_map->region_cache_count = 1;

922 923 924
	return resv_map;
}

925
void resv_map_release(struct kref *ref)
926 927
{
	struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
928 929
	struct list_head *head = &resv_map->region_cache;
	struct file_region *rg, *trg;
930 931

	/* Clear out any active regions before we release the map. */
932
	region_del(resv_map, 0, LONG_MAX);
933 934 935 936 937 938 939 940 941

	/* ... and any entries left in the cache */
	list_for_each_entry_safe(rg, trg, head, link) {
		list_del(&rg->link);
		kfree(rg);
	}

	VM_BUG_ON(resv_map->adds_in_progress);

942 943 944
	kfree(resv_map);
}

945 946
static inline struct resv_map *inode_resv_map(struct inode *inode)
{
947 948 949 950 951 952 953 954 955
	/*
	 * At inode evict time, i_mapping may not point to the original
	 * address space within the inode.  This original address space
	 * contains the pointer to the resv_map.  So, always use the
	 * address space embedded within the inode.
	 * The VERY common case is inode->mapping == &inode->i_data but,
	 * this may not be true for device special inodes.
	 */
	return (struct resv_map *)(&inode->i_data)->private_data;
956 957
}

958
static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
959
{
960
	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
961 962 963 964 965 966 967
	if (vma->vm_flags & VM_MAYSHARE) {
		struct address_space *mapping = vma->vm_file->f_mapping;
		struct inode *inode = mapping->host;

		return inode_resv_map(inode);

	} else {
968 969
		return (struct resv_map *)(get_vma_private_data(vma) &
							~HPAGE_RESV_MASK);
970
	}
971 972
}

973
static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
974
{
975 976
	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
	VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
977

978 979
	set_vma_private_data(vma, (get_vma_private_data(vma) &
				HPAGE_RESV_MASK) | (unsigned long)map);
980 981 982 983
}

static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
{
984 985
	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
	VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
986 987

	set_vma_private_data(vma, get_vma_private_data(vma) | flags);
988 989 990 991
}

static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
{
992
	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
993 994

	return (get_vma_private_data(vma) & flag) != 0;
995 996
}

997
/* Reset counters to 0 and clear all HPAGE_RESV_* flags */
998 999
void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
{
1000
	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1001
	if (!(vma->vm_flags & VM_MAYSHARE))
1002 1003 1004 1005
		vma->vm_private_data = (void *)0;
}

/* Returns true if the VMA has associated reserve pages */
1006
static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
1007
{
1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
	if (vma->vm_flags & VM_NORESERVE) {
		/*
		 * This address is already reserved by other process(chg == 0),
		 * so, we should decrement reserved count. Without decrementing,
		 * reserve count remains after releasing inode, because this
		 * allocated page will go into page cache and is regarded as
		 * coming from reserved pool in releasing step.  Currently, we
		 * don't have any other solution to deal with this situation
		 * properly, so add work-around here.
		 */
		if (vma->vm_flags & VM_MAYSHARE && chg == 0)
1019
			return true;
1020
		else
1021
			return false;
1022
	}
1023 1024

	/* Shared mappings always use reserves */
1025 1026 1027 1028 1029
	if (vma->vm_flags & VM_MAYSHARE) {
		/*
		 * We know VM_NORESERVE is not set.  Therefore, there SHOULD
		 * be a region map for all pages.  The only situation where
		 * there is no region map is if a hole was punched via
E
Ethon Paul 已提交
1030
		 * fallocate.  In this case, there really are no reserves to
1031 1032 1033 1034 1035 1036 1037
		 * use.  This situation is indicated if chg != 0.
		 */
		if (chg)
			return false;
		else
			return true;
	}
1038 1039 1040 1041 1042

	/*
	 * Only the process that called mmap() has reserves for
	 * private mappings.
	 */
1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063
	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
		/*
		 * Like the shared case above, a hole punch or truncate
		 * could have been performed on the private mapping.
		 * Examine the value of chg to determine if reserves
		 * actually exist or were previously consumed.
		 * Very Subtle - The value of chg comes from a previous
		 * call to vma_needs_reserves().  The reserve map for
		 * private mappings has different (opposite) semantics
		 * than that of shared mappings.  vma_needs_reserves()
		 * has already taken this difference in semantics into
		 * account.  Therefore, the meaning of chg is the same
		 * as in the shared case above.  Code could easily be
		 * combined, but keeping it separate draws attention to
		 * subtle differences.
		 */
		if (chg)
			return false;
		else
			return true;
	}
1064

1065
	return false;
1066 1067
}

1068
static void enqueue_huge_page(struct hstate *h, struct page *page)
L
Linus Torvalds 已提交
1069 1070
{
	int nid = page_to_nid(page);
1071 1072

	lockdep_assert_held(&hugetlb_lock);
1073
	list_move(&page->lru, &h->hugepage_freelists[nid]);
1074 1075
	h->free_huge_pages++;
	h->free_huge_pages_node[nid]++;
1076
	SetHPageFreed(page);
L
Linus Torvalds 已提交
1077 1078
}

1079
static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
1080 1081
{
	struct page *page;
1082
	bool pin = !!(current->flags & PF_MEMALLOC_PIN);
1083

1084
	lockdep_assert_held(&hugetlb_lock);
1085
	list_for_each_entry(page, &h->hugepage_freelists[nid], lru) {
1086
		if (pin && !is_pinnable_page(page))
1087
			continue;
1088

1089 1090 1091 1092 1093
		if (PageHWPoison(page))
			continue;

		list_move(&page->lru, &h->hugepage_activelist);
		set_page_refcounted(page);
1094
		ClearHPageFreed(page);
1095 1096 1097
		h->free_huge_pages--;
		h->free_huge_pages_node[nid]--;
		return page;
1098 1099
	}

1100
	return NULL;
1101 1102
}

1103 1104
static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
		nodemask_t *nmask)
1105
{
1106 1107 1108 1109
	unsigned int cpuset_mems_cookie;
	struct zonelist *zonelist;
	struct zone *zone;
	struct zoneref *z;
1110
	int node = NUMA_NO_NODE;
1111

1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127
	zonelist = node_zonelist(nid, gfp_mask);

retry_cpuset:
	cpuset_mems_cookie = read_mems_allowed_begin();
	for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
		struct page *page;

		if (!cpuset_zone_allowed(zone, gfp_mask))
			continue;
		/*
		 * no need to ask again on the same node. Pool is node rather than
		 * zone aware
		 */
		if (zone_to_nid(zone) == node)
			continue;
		node = zone_to_nid(zone);
1128 1129 1130 1131 1132

		page = dequeue_huge_page_node_exact(h, node);
		if (page)
			return page;
	}
1133 1134 1135
	if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
		goto retry_cpuset;

1136 1137 1138
	return NULL;
}

1139 1140
static struct page *dequeue_huge_page_vma(struct hstate *h,
				struct vm_area_struct *vma,
1141 1142
				unsigned long address, int avoid_reserve,
				long chg)
L
Linus Torvalds 已提交
1143
{
1144
	struct page *page;
1145
	struct mempolicy *mpol;
1146
	gfp_t gfp_mask;
1147
	nodemask_t *nodemask;
1148
	int nid;
L
Linus Torvalds 已提交
1149

1150 1151 1152 1153 1154
	/*
	 * A child process with MAP_PRIVATE mappings created by their parent
	 * have no page reserves. This check ensures that reservations are
	 * not "stolen". The child may still get SIGKILLed
	 */
1155
	if (!vma_has_reserves(vma, chg) &&
1156
			h->free_huge_pages - h->resv_huge_pages == 0)
1157
		goto err;
1158

1159
	/* If reserves cannot be used, ensure enough pages are in the pool */
1160
	if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
1161
		goto err;
1162

1163 1164
	gfp_mask = htlb_alloc_mask(h);
	nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1165 1166
	page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
	if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
1167
		SetHPageRestoreReserve(page);
1168
		h->resv_huge_pages--;
L
Linus Torvalds 已提交
1169
	}
1170

1171
	mpol_cond_put(mpol);
L
Linus Torvalds 已提交
1172
	return page;
1173 1174 1175

err:
	return NULL;
L
Linus Torvalds 已提交
1176 1177
}

1178 1179 1180 1181 1182 1183 1184 1185 1186
/*
 * common helper functions for hstate_next_node_to_{alloc|free}.
 * We may have allocated or freed a huge page based on a different
 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
 * be outside of *nodes_allowed.  Ensure that we use an allowed
 * node for alloc or free.
 */
static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
{
1187
	nid = next_node_in(nid, *nodes_allowed);
1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219
	VM_BUG_ON(nid >= MAX_NUMNODES);

	return nid;
}

static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
{
	if (!node_isset(nid, *nodes_allowed))
		nid = next_node_allowed(nid, nodes_allowed);
	return nid;
}

/*
 * returns the previously saved node ["this node"] from which to
 * allocate a persistent huge page for the pool and advance the
 * next node from which to allocate, handling wrap at end of node
 * mask.
 */
static int hstate_next_node_to_alloc(struct hstate *h,
					nodemask_t *nodes_allowed)
{
	int nid;

	VM_BUG_ON(!nodes_allowed);

	nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
	h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);

	return nid;
}

/*
1220
 * helper for remove_pool_huge_page() - return the previously saved
1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248
 * node ["this node"] from which to free a huge page.  Advance the
 * next node id whether or not we find a free huge page to free so
 * that the next attempt to free addresses the next node.
 */
static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
{
	int nid;

	VM_BUG_ON(!nodes_allowed);

	nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
	h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);

	return nid;
}

#define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)		\
	for (nr_nodes = nodes_weight(*mask);				\
		nr_nodes > 0 &&						\
		((node = hstate_next_node_to_alloc(hs, mask)) || 1);	\
		nr_nodes--)

#define for_each_node_mask_to_free(hs, nr_nodes, node, mask)		\
	for (nr_nodes = nodes_weight(*mask);				\
		nr_nodes > 0 &&						\
		((node = hstate_next_node_to_free(hs, mask)) || 1);	\
		nr_nodes--)

1249
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
1250
static void destroy_compound_gigantic_page(struct page *page,
1251
					unsigned int order)
1252 1253 1254 1255 1256
{
	int i;
	int nr_pages = 1 << order;
	struct page *p = page + 1;

1257
	atomic_set(compound_mapcount_ptr(page), 0);
1258
	atomic_set(compound_pincount_ptr(page), 0);
1259

1260
	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1261
		clear_compound_head(p);
1262 1263 1264 1265
		set_page_refcounted(p);
	}

	set_compound_order(page, 0);
1266
	page[1].compound_nr = 0;
1267 1268 1269
	__ClearPageHead(page);
}

1270
static void free_gigantic_page(struct page *page, unsigned int order)
1271
{
1272 1273 1274 1275
	/*
	 * If the page isn't allocated using the cma allocator,
	 * cma_release() returns false.
	 */
1276 1277
#ifdef CONFIG_CMA
	if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order))
1278
		return;
1279
#endif
1280

1281 1282 1283
	free_contig_range(page_to_pfn(page), 1 << order);
}

1284
#ifdef CONFIG_CONTIG_ALLOC
1285 1286
static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
		int nid, nodemask_t *nodemask)
1287
{
1288
	unsigned long nr_pages = pages_per_huge_page(h);
1289 1290
	if (nid == NUMA_NO_NODE)
		nid = numa_mem_id();
1291

1292 1293
#ifdef CONFIG_CMA
	{
1294 1295 1296
		struct page *page;
		int node;

1297 1298 1299
		if (hugetlb_cma[nid]) {
			page = cma_alloc(hugetlb_cma[nid], nr_pages,
					huge_page_order(h), true);
1300 1301 1302
			if (page)
				return page;
		}
1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314

		if (!(gfp_mask & __GFP_THISNODE)) {
			for_each_node_mask(node, *nodemask) {
				if (node == nid || !hugetlb_cma[node])
					continue;

				page = cma_alloc(hugetlb_cma[node], nr_pages,
						huge_page_order(h), true);
				if (page)
					return page;
			}
		}
1315
	}
1316
#endif
1317

1318
	return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
1319 1320 1321
}

static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1322
static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1323 1324 1325 1326 1327 1328 1329
#else /* !CONFIG_CONTIG_ALLOC */
static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
					int nid, nodemask_t *nodemask)
{
	return NULL;
}
#endif /* CONFIG_CONTIG_ALLOC */
1330

1331
#else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
1332
static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1333 1334 1335 1336
					int nid, nodemask_t *nodemask)
{
	return NULL;
}
1337
static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1338
static inline void destroy_compound_gigantic_page(struct page *page,
1339
						unsigned int order) { }
1340 1341
#endif

1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355
/*
 * Remove hugetlb page from lists, and update dtor so that page appears
 * as just a compound page.  A reference is held on the page.
 *
 * Must be called with hugetlb lock held.
 */
static void remove_hugetlb_page(struct hstate *h, struct page *page,
							bool adjust_surplus)
{
	int nid = page_to_nid(page);

	VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
	VM_BUG_ON_PAGE(hugetlb_cgroup_from_page_rsvd(page), page);

1356
	lockdep_assert_held(&hugetlb_lock);
1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377
	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
		return;

	list_del(&page->lru);

	if (HPageFreed(page)) {
		h->free_huge_pages--;
		h->free_huge_pages_node[nid]--;
	}
	if (adjust_surplus) {
		h->surplus_huge_pages--;
		h->surplus_huge_pages_node[nid]--;
	}

	set_page_refcounted(page);
	set_compound_page_dtor(page, NULL_COMPOUND_DTOR);

	h->nr_huge_pages--;
	h->nr_huge_pages_node[nid]--;
}

1378
static void update_and_free_page(struct hstate *h, struct page *page)
A
Adam Litke 已提交
1379 1380
{
	int i;
1381
	struct page *subpage = page;
1382

1383
	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1384
		return;
1385

1386 1387 1388
	for (i = 0; i < pages_per_huge_page(h);
	     i++, subpage = mem_map_next(subpage, page, i)) {
		subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
1389
				1 << PG_referenced | 1 << PG_dirty |
1390 1391
				1 << PG_active | 1 << PG_private |
				1 << PG_writeback);
A
Adam Litke 已提交
1392
	}
1393 1394 1395 1396 1397 1398
	if (hstate_is_gigantic(h)) {
		destroy_compound_gigantic_page(page, huge_page_order(h));
		free_gigantic_page(page, huge_page_order(h));
	} else {
		__free_pages(page, huge_page_order(h));
	}
A
Adam Litke 已提交
1399 1400
}

1401 1402 1403 1404 1405 1406 1407 1408 1409 1410
static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list)
{
	struct page *page, *t_page;

	list_for_each_entry_safe(page, t_page, list, lru) {
		update_and_free_page(h, page);
		cond_resched();
	}
}

1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421
struct hstate *size_to_hstate(unsigned long size)
{
	struct hstate *h;

	for_each_hstate(h) {
		if (huge_page_size(h) == size)
			return h;
	}
	return NULL;
}

1422
void free_huge_page(struct page *page)
1423
{
1424 1425 1426 1427
	/*
	 * Can't pass hstate in here because it is called from the
	 * compound page destructor.
	 */
1428
	struct hstate *h = page_hstate(page);
1429
	int nid = page_to_nid(page);
1430
	struct hugepage_subpool *spool = hugetlb_page_subpool(page);
1431
	bool restore_reserve;
1432
	unsigned long flags;
1433

1434 1435
	VM_BUG_ON_PAGE(page_count(page), page);
	VM_BUG_ON_PAGE(page_mapcount(page), page);
1436

1437
	hugetlb_set_page_subpool(page, NULL);
1438
	page->mapping = NULL;
1439 1440
	restore_reserve = HPageRestoreReserve(page);
	ClearHPageRestoreReserve(page);
1441

1442
	/*
1443
	 * If HPageRestoreReserve was set on page, page allocation consumed a
1444 1445 1446
	 * reservation.  If the page was associated with a subpool, there
	 * would have been a page reserved in the subpool before allocation
	 * via hugepage_subpool_get_pages().  Since we are 'restoring' the
M
Miaohe Lin 已提交
1447
	 * reservation, do not call hugepage_subpool_put_pages() as this will
1448
	 * remove the reserved page from the subpool.
1449
	 */
1450 1451 1452 1453 1454 1455 1456 1457 1458 1459
	if (!restore_reserve) {
		/*
		 * A return code of zero implies that the subpool will be
		 * under its minimum size if the reservation is not restored
		 * after page is free.  Therefore, force restore_reserve
		 * operation.
		 */
		if (hugepage_subpool_put_pages(spool, 1) == 0)
			restore_reserve = true;
	}
1460

1461
	spin_lock_irqsave(&hugetlb_lock, flags);
1462
	ClearHPageMigratable(page);
1463 1464
	hugetlb_cgroup_uncharge_page(hstate_index(h),
				     pages_per_huge_page(h), page);
1465 1466
	hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
					  pages_per_huge_page(h), page);
1467 1468 1469
	if (restore_reserve)
		h->resv_huge_pages++;

1470
	if (HPageTemporary(page)) {
1471
		remove_hugetlb_page(h, page, false);
1472
		spin_unlock_irqrestore(&hugetlb_lock, flags);
1473 1474
		update_and_free_page(h, page);
	} else if (h->surplus_huge_pages_node[nid]) {
1475
		/* remove the page from active list */
1476
		remove_hugetlb_page(h, page, true);
1477
		spin_unlock_irqrestore(&hugetlb_lock, flags);
1478
		update_and_free_page(h, page);
1479
	} else {
1480
		arch_clear_hugepage_flags(page);
1481
		enqueue_huge_page(h, page);
1482
		spin_unlock_irqrestore(&hugetlb_lock, flags);
1483 1484 1485
	}
}

1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496
/*
 * Must be called with the hugetlb lock held
 */
static void __prep_account_new_huge_page(struct hstate *h, int nid)
{
	lockdep_assert_held(&hugetlb_lock);
	h->nr_huge_pages++;
	h->nr_huge_pages_node[nid]++;
}

static void __prep_new_huge_page(struct page *page)
1497
{
1498
	INIT_LIST_HEAD(&page->lru);
1499
	set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1500
	hugetlb_set_page_subpool(page, NULL);
1501
	set_hugetlb_cgroup(page, NULL);
1502
	set_hugetlb_cgroup_rsvd(page, NULL);
1503 1504 1505 1506 1507
}

static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
{
	__prep_new_huge_page(page);
1508
	spin_lock_irq(&hugetlb_lock);
1509
	__prep_account_new_huge_page(h, nid);
1510
	spin_unlock_irq(&hugetlb_lock);
1511 1512
}

1513
static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1514 1515 1516 1517 1518 1519 1520
{
	int i;
	int nr_pages = 1 << order;
	struct page *p = page + 1;

	/* we rely on prep_new_huge_page to set the destructor */
	set_compound_order(page, order);
1521
	__ClearPageReserved(page);
1522
	__SetPageHead(page);
1523
	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1524 1525 1526 1527
		/*
		 * For gigantic hugepages allocated through bootmem at
		 * boot, it's safer to be consistent with the not-gigantic
		 * hugepages and clear the PG_reserved bit from all tail pages
E
Ethon Paul 已提交
1528
		 * too.  Otherwise drivers using get_user_pages() to access tail
1529 1530 1531 1532 1533 1534 1535 1536
		 * pages may get the reference counting wrong if they see
		 * PG_reserved set on a tail page (despite the head page not
		 * having PG_reserved set).  Enforcing this consistency between
		 * head and tail pages allows drivers to optimize away a check
		 * on the head page when they need know if put_page() is needed
		 * after get_user_pages().
		 */
		__ClearPageReserved(p);
1537
		set_page_count(p, 0);
1538
		set_compound_head(p, page);
1539
	}
1540
	atomic_set(compound_mapcount_ptr(page), -1);
1541
	atomic_set(compound_pincount_ptr(page), 0);
1542 1543
}

A
Andrew Morton 已提交
1544 1545 1546 1547 1548
/*
 * PageHuge() only returns true for hugetlbfs pages, but not for normal or
 * transparent huge pages.  See the PageTransHuge() documentation for more
 * details.
 */
1549 1550 1551 1552 1553 1554
int PageHuge(struct page *page)
{
	if (!PageCompound(page))
		return 0;

	page = compound_head(page);
1555
	return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1556
}
1557 1558
EXPORT_SYMBOL_GPL(PageHuge);

1559 1560 1561 1562 1563 1564 1565 1566 1567
/*
 * PageHeadHuge() only returns true for hugetlbfs head page, but not for
 * normal or transparent huge pages.
 */
int PageHeadHuge(struct page *page_head)
{
	if (!PageHead(page_head))
		return 0;

1568
	return page_head[1].compound_dtor == HUGETLB_PAGE_DTOR;
1569 1570
}

1571 1572 1573
/*
 * Find and lock address space (mapping) in write mode.
 *
1574 1575 1576
 * Upon entry, the page is locked which means that page_mapping() is
 * stable.  Due to locking order, we can only trylock_write.  If we can
 * not get the lock, simply return NULL to caller.
1577 1578 1579
 */
struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
{
1580
	struct address_space *mapping = page_mapping(hpage);
1581 1582 1583 1584 1585 1586 1587

	if (!mapping)
		return mapping;

	if (i_mmap_trylock_write(mapping))
		return mapping;

1588
	return NULL;
1589 1590
}

1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607
pgoff_t __basepage_index(struct page *page)
{
	struct page *page_head = compound_head(page);
	pgoff_t index = page_index(page_head);
	unsigned long compound_idx;

	if (!PageHuge(page_head))
		return page_index(page);

	if (compound_order(page_head) >= MAX_ORDER)
		compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
	else
		compound_idx = page - page_head;

	return (index << compound_order(page_head)) + compound_idx;
}

1608
static struct page *alloc_buddy_huge_page(struct hstate *h,
1609 1610
		gfp_t gfp_mask, int nid, nodemask_t *nmask,
		nodemask_t *node_alloc_noretry)
L
Linus Torvalds 已提交
1611
{
1612
	int order = huge_page_order(h);
L
Linus Torvalds 已提交
1613
	struct page *page;
1614
	bool alloc_try_hard = true;
1615

1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627
	/*
	 * By default we always try hard to allocate the page with
	 * __GFP_RETRY_MAYFAIL flag.  However, if we are allocating pages in
	 * a loop (to adjust global huge page counts) and previous allocation
	 * failed, do not continue to try hard on the same node.  Use the
	 * node_alloc_noretry bitmap to manage this state information.
	 */
	if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry))
		alloc_try_hard = false;
	gfp_mask |= __GFP_COMP|__GFP_NOWARN;
	if (alloc_try_hard)
		gfp_mask |= __GFP_RETRY_MAYFAIL;
1628 1629
	if (nid == NUMA_NO_NODE)
		nid = numa_mem_id();
1630
	page = __alloc_pages(gfp_mask, order, nid, nmask);
1631 1632 1633 1634
	if (page)
		__count_vm_event(HTLB_BUDDY_PGALLOC);
	else
		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1635

1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651
	/*
	 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this
	 * indicates an overall state change.  Clear bit so that we resume
	 * normal 'try hard' allocations.
	 */
	if (node_alloc_noretry && page && !alloc_try_hard)
		node_clear(nid, *node_alloc_noretry);

	/*
	 * If we tried hard to get a page but failed, set bit so that
	 * subsequent attempts will not try as hard until there is an
	 * overall state change.
	 */
	if (node_alloc_noretry && !page && alloc_try_hard)
		node_set(nid, *node_alloc_noretry);

1652 1653 1654
	return page;
}

1655 1656 1657 1658 1659
/*
 * Common helper to allocate a fresh hugetlb page. All specific allocators
 * should use this function to get new hugetlb pages
 */
static struct page *alloc_fresh_huge_page(struct hstate *h,
1660 1661
		gfp_t gfp_mask, int nid, nodemask_t *nmask,
		nodemask_t *node_alloc_noretry)
1662 1663 1664 1665 1666 1667 1668
{
	struct page *page;

	if (hstate_is_gigantic(h))
		page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
	else
		page = alloc_buddy_huge_page(h, gfp_mask,
1669
				nid, nmask, node_alloc_noretry);
1670 1671 1672 1673 1674 1675 1676 1677 1678 1679
	if (!page)
		return NULL;

	if (hstate_is_gigantic(h))
		prep_compound_gigantic_page(page, huge_page_order(h));
	prep_new_huge_page(h, page, page_to_nid(page));

	return page;
}

1680 1681 1682 1683
/*
 * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
 * manner.
 */
1684 1685
static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
				nodemask_t *node_alloc_noretry)
1686 1687 1688
{
	struct page *page;
	int nr_nodes, node;
1689
	gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
1690 1691

	for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1692 1693
		page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed,
						node_alloc_noretry);
1694
		if (page)
1695 1696 1697
			break;
	}

1698 1699
	if (!page)
		return 0;
1700

1701 1702 1703
	put_page(page); /* free it into the hugepage allocator */

	return 1;
1704 1705
}

1706
/*
1707 1708 1709 1710
 * Remove huge page from pool from next node to free.  Attempt to keep
 * persistent huge pages more or less balanced over allowed nodes.
 * This routine only 'removes' the hugetlb page.  The caller must make
 * an additional call to free the page to low level allocators.
1711 1712
 * Called with hugetlb_lock locked.
 */
1713 1714 1715
static struct page *remove_pool_huge_page(struct hstate *h,
						nodemask_t *nodes_allowed,
						 bool acct_surplus)
1716
{
1717
	int nr_nodes, node;
1718
	struct page *page = NULL;
1719

1720
	lockdep_assert_held(&hugetlb_lock);
1721
	for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1722 1723 1724 1725
		/*
		 * If we're returning unused surplus pages, only examine
		 * nodes with surplus pages.
		 */
1726 1727
		if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
		    !list_empty(&h->hugepage_freelists[node])) {
1728
			page = list_entry(h->hugepage_freelists[node].next,
1729
					  struct page, lru);
1730
			remove_hugetlb_page(h, page, acct_surplus);
1731
			break;
1732
		}
1733
	}
1734

1735
	return page;
1736 1737
}

1738 1739
/*
 * Dissolve a given free hugepage into free buddy pages. This function does
1740 1741 1742 1743 1744 1745 1746
 * nothing for in-use hugepages and non-hugepages.
 * This function returns values like below:
 *
 *  -EBUSY: failed to dissolved free hugepages or the hugepage is in-use
 *          (allocated or reserved.)
 *       0: successfully dissolved free hugepages or the page is not a
 *          hugepage (considered as already dissolved)
1747
 */
1748
int dissolve_free_huge_page(struct page *page)
1749
{
1750
	int rc = -EBUSY;
1751

1752
retry:
1753 1754 1755 1756
	/* Not to disrupt normal path by vainly holding hugetlb_lock */
	if (!PageHuge(page))
		return 0;

1757
	spin_lock_irq(&hugetlb_lock);
1758 1759 1760 1761 1762 1763
	if (!PageHuge(page)) {
		rc = 0;
		goto out;
	}

	if (!page_count(page)) {
1764 1765
		struct page *head = compound_head(page);
		struct hstate *h = page_hstate(head);
1766
		if (h->free_huge_pages - h->resv_huge_pages == 0)
1767
			goto out;
1768 1769 1770 1771 1772

		/*
		 * We should make sure that the page is already on the free list
		 * when it is dissolved.
		 */
1773
		if (unlikely(!HPageFreed(head))) {
1774
			spin_unlock_irq(&hugetlb_lock);
1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787
			cond_resched();

			/*
			 * Theoretically, we should return -EBUSY when we
			 * encounter this race. In fact, we have a chance
			 * to successfully dissolve the page if we do a
			 * retry. Because the race window is quite small.
			 * If we seize this opportunity, it is an optimization
			 * for increasing the success rate of dissolving page.
			 */
			goto retry;
		}

1788 1789 1790 1791 1792 1793 1794 1795
		/*
		 * Move PageHWPoison flag from head page to the raw error page,
		 * which makes any subpages rather than the error page reusable.
		 */
		if (PageHWPoison(head) && page != head) {
			SetPageHWPoison(page);
			ClearPageHWPoison(head);
		}
1796
		remove_hugetlb_page(h, head, false);
1797
		h->max_huge_pages--;
1798
		spin_unlock_irq(&hugetlb_lock);
1799
		update_and_free_page(h, head);
1800
		return 0;
1801
	}
1802
out:
1803
	spin_unlock_irq(&hugetlb_lock);
1804
	return rc;
1805 1806 1807 1808 1809
}

/*
 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
 * make specified memory blocks removable from the system.
1810 1811
 * Note that this will dissolve a free gigantic hugepage completely, if any
 * part of it lies within the given range.
1812 1813
 * Also note that if dissolve_free_huge_page() returns with an error, all
 * free hugepages that were dissolved before that error are lost.
1814
 */
1815
int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1816 1817
{
	unsigned long pfn;
1818
	struct page *page;
1819
	int rc = 0;
1820

1821
	if (!hugepages_supported())
1822
		return rc;
1823

1824 1825
	for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
		page = pfn_to_page(pfn);
1826 1827 1828
		rc = dissolve_free_huge_page(page);
		if (rc)
			break;
1829
	}
1830 1831

	return rc;
1832 1833
}

1834 1835 1836
/*
 * Allocates a fresh surplus page from the page allocator.
 */
1837
static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
1838
		int nid, nodemask_t *nmask)
1839
{
1840
	struct page *page = NULL;
1841

1842
	if (hstate_is_gigantic(h))
1843 1844
		return NULL;

1845
	spin_lock_irq(&hugetlb_lock);
1846 1847
	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
		goto out_unlock;
1848
	spin_unlock_irq(&hugetlb_lock);
1849

1850
	page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
1851
	if (!page)
1852
		return NULL;
1853

1854
	spin_lock_irq(&hugetlb_lock);
1855 1856 1857 1858 1859 1860 1861 1862
	/*
	 * We could have raced with the pool size change.
	 * Double check that and simply deallocate the new page
	 * if we would end up overcommiting the surpluses. Abuse
	 * temporary page to workaround the nasty free_huge_page
	 * codeflow
	 */
	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1863
		SetHPageTemporary(page);
1864
		spin_unlock_irq(&hugetlb_lock);
1865
		put_page(page);
1866
		return NULL;
1867 1868
	} else {
		h->surplus_huge_pages++;
1869
		h->surplus_huge_pages_node[page_to_nid(page)]++;
1870
	}
1871 1872

out_unlock:
1873
	spin_unlock_irq(&hugetlb_lock);
1874 1875 1876 1877

	return page;
}

1878
static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
1879
				     int nid, nodemask_t *nmask)
1880 1881 1882 1883 1884 1885
{
	struct page *page;

	if (hstate_is_gigantic(h))
		return NULL;

1886
	page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
1887 1888 1889 1890 1891 1892 1893
	if (!page)
		return NULL;

	/*
	 * We do not account these pages as surplus because they are only
	 * temporary and will be released properly on the last reference
	 */
1894
	SetHPageTemporary(page);
1895 1896 1897 1898

	return page;
}

1899 1900 1901
/*
 * Use the VMA's mpolicy to allocate a huge page from the buddy.
 */
D
Dave Hansen 已提交
1902
static
1903
struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
1904 1905
		struct vm_area_struct *vma, unsigned long addr)
{
1906 1907 1908 1909 1910 1911 1912
	struct page *page;
	struct mempolicy *mpol;
	gfp_t gfp_mask = htlb_alloc_mask(h);
	int nid;
	nodemask_t *nodemask;

	nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
1913
	page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
1914 1915 1916
	mpol_cond_put(mpol);

	return page;
1917 1918
}

1919
/* page migration callback function */
1920
struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
1921
		nodemask_t *nmask, gfp_t gfp_mask)
1922
{
1923
	spin_lock_irq(&hugetlb_lock);
1924
	if (h->free_huge_pages - h->resv_huge_pages > 0) {
1925 1926 1927 1928
		struct page *page;

		page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
		if (page) {
1929
			spin_unlock_irq(&hugetlb_lock);
1930
			return page;
1931 1932
		}
	}
1933
	spin_unlock_irq(&hugetlb_lock);
1934

1935
	return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
1936 1937
}

1938
/* mempolicy aware migration callback */
1939 1940
struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
		unsigned long address)
1941 1942 1943 1944 1945 1946 1947 1948 1949
{
	struct mempolicy *mpol;
	nodemask_t *nodemask;
	struct page *page;
	gfp_t gfp_mask;
	int node;

	gfp_mask = htlb_alloc_mask(h);
	node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1950
	page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask);
1951 1952 1953 1954 1955
	mpol_cond_put(mpol);

	return page;
}

1956
/*
L
Lucas De Marchi 已提交
1957
 * Increase the hugetlb pool such that it can accommodate a reservation
1958 1959
 * of size 'delta'.
 */
1960
static int gather_surplus_pages(struct hstate *h, long delta)
1961
	__must_hold(&hugetlb_lock)
1962 1963 1964
{
	struct list_head surplus_list;
	struct page *page, *tmp;
1965 1966 1967
	int ret;
	long i;
	long needed, allocated;
1968
	bool alloc_ok = true;
1969

1970
	lockdep_assert_held(&hugetlb_lock);
1971
	needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1972
	if (needed <= 0) {
1973
		h->resv_huge_pages += delta;
1974
		return 0;
1975
	}
1976 1977 1978 1979 1980 1981

	allocated = 0;
	INIT_LIST_HEAD(&surplus_list);

	ret = -ENOMEM;
retry:
1982
	spin_unlock_irq(&hugetlb_lock);
1983
	for (i = 0; i < needed; i++) {
1984
		page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
1985
				NUMA_NO_NODE, NULL);
1986 1987 1988 1989
		if (!page) {
			alloc_ok = false;
			break;
		}
1990
		list_add(&page->lru, &surplus_list);
1991
		cond_resched();
1992
	}
1993
	allocated += i;
1994 1995 1996 1997 1998

	/*
	 * After retaking hugetlb_lock, we need to recalculate 'needed'
	 * because either resv_huge_pages or free_huge_pages may have changed.
	 */
1999
	spin_lock_irq(&hugetlb_lock);
2000 2001
	needed = (h->resv_huge_pages + delta) -
			(h->free_huge_pages + allocated);
2002 2003 2004 2005 2006 2007 2008 2009 2010 2011
	if (needed > 0) {
		if (alloc_ok)
			goto retry;
		/*
		 * We were not able to allocate enough pages to
		 * satisfy the entire reservation so we free what
		 * we've allocated so far.
		 */
		goto free;
	}
2012 2013
	/*
	 * The surplus_list now contains _at_least_ the number of extra pages
L
Lucas De Marchi 已提交
2014
	 * needed to accommodate the reservation.  Add the appropriate number
2015
	 * of pages to the hugetlb pool and free the extras back to the buddy
2016 2017 2018
	 * allocator.  Commit the entire reservation here to prevent another
	 * process from stealing the pages as they are added to the pool but
	 * before they are reserved.
2019 2020
	 */
	needed += allocated;
2021
	h->resv_huge_pages += delta;
2022
	ret = 0;
2023

2024
	/* Free the needed pages to the hugetlb pool */
2025
	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
2026 2027
		int zeroed;

2028 2029
		if ((--needed) < 0)
			break;
2030 2031 2032 2033
		/*
		 * This page is now managed by the hugetlb allocator and has
		 * no users -- drop the buddy allocator's reference.
		 */
2034 2035
		zeroed = put_page_testzero(page);
		VM_BUG_ON_PAGE(!zeroed, page);
2036
		enqueue_huge_page(h, page);
2037
	}
2038
free:
2039
	spin_unlock_irq(&hugetlb_lock);
2040 2041

	/* Free unnecessary surplus pages to the buddy allocator */
2042 2043
	list_for_each_entry_safe(page, tmp, &surplus_list, lru)
		put_page(page);
2044
	spin_lock_irq(&hugetlb_lock);
2045 2046 2047 2048 2049

	return ret;
}

/*
2050 2051 2052 2053 2054 2055
 * This routine has two main purposes:
 * 1) Decrement the reservation count (resv_huge_pages) by the value passed
 *    in unused_resv_pages.  This corresponds to the prior adjustments made
 *    to the associated reservation map.
 * 2) Free any unused surplus pages that may have been allocated to satisfy
 *    the reservation.  As many as unused_resv_pages may be freed.
2056
 */
2057 2058
static void return_unused_surplus_pages(struct hstate *h,
					unsigned long unused_resv_pages)
2059 2060
{
	unsigned long nr_pages;
2061 2062 2063
	struct page *page;
	LIST_HEAD(page_list);

2064
	lockdep_assert_held(&hugetlb_lock);
2065 2066
	/* Uncommit the reservation */
	h->resv_huge_pages -= unused_resv_pages;
2067

2068
	/* Cannot return gigantic pages currently */
2069
	if (hstate_is_gigantic(h))
2070
		goto out;
2071

2072 2073 2074 2075
	/*
	 * Part (or even all) of the reservation could have been backed
	 * by pre-allocated pages. Only free surplus pages.
	 */
2076
	nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
2077

2078 2079
	/*
	 * We want to release as many surplus pages as possible, spread
2080 2081 2082
	 * evenly across all nodes with memory. Iterate across these nodes
	 * until we can no longer free unreserved surplus pages. This occurs
	 * when the nodes with surplus pages have no free pages.
2083
	 * remove_pool_huge_page() will balance the freed pages across the
2084
	 * on-line nodes with memory and will handle the hstate accounting.
2085 2086
	 */
	while (nr_pages--) {
2087 2088
		page = remove_pool_huge_page(h, &node_states[N_MEMORY], 1);
		if (!page)
2089
			goto out;
2090 2091

		list_add(&page->lru, &page_list);
2092
	}
2093 2094

out:
2095
	spin_unlock_irq(&hugetlb_lock);
2096
	update_and_free_pages_bulk(h, &page_list);
2097
	spin_lock_irq(&hugetlb_lock);
2098 2099
}

2100

2101
/*
2102
 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
2103
 * are used by the huge page allocation routines to manage reservations.
2104 2105 2106 2107 2108 2109
 *
 * vma_needs_reservation is called to determine if the huge page at addr
 * within the vma has an associated reservation.  If a reservation is
 * needed, the value 1 is returned.  The caller is then responsible for
 * managing the global reservation and subpool usage counts.  After
 * the huge page has been allocated, vma_commit_reservation is called
2110 2111 2112
 * to add the page to the reservation map.  If the page allocation fails,
 * the reservation must be ended instead of committed.  vma_end_reservation
 * is called in such cases.
2113 2114 2115 2116 2117 2118
 *
 * In the normal case, vma_commit_reservation returns the same value
 * as the preceding vma_needs_reservation call.  The only time this
 * is not the case is if a reserve map was changed between calls.  It
 * is the responsibility of the caller to notice the difference and
 * take appropriate action.
2119 2120 2121 2122 2123
 *
 * vma_add_reservation is used in error paths where a reservation must
 * be restored when a newly allocated huge page must be freed.  It is
 * to be called after calling vma_needs_reservation to determine if a
 * reservation exists.
2124
 */
2125 2126 2127
enum vma_resv_mode {
	VMA_NEEDS_RESV,
	VMA_COMMIT_RESV,
2128
	VMA_END_RESV,
2129
	VMA_ADD_RESV,
2130
};
2131 2132
static long __vma_reservation_common(struct hstate *h,
				struct vm_area_struct *vma, unsigned long addr,
2133
				enum vma_resv_mode mode)
2134
{
2135 2136
	struct resv_map *resv;
	pgoff_t idx;
2137
	long ret;
2138
	long dummy_out_regions_needed;
2139

2140 2141
	resv = vma_resv_map(vma);
	if (!resv)
2142
		return 1;
2143

2144
	idx = vma_hugecache_offset(h, vma, addr);
2145 2146
	switch (mode) {
	case VMA_NEEDS_RESV:
2147 2148 2149 2150 2151 2152
		ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed);
		/* We assume that vma_reservation_* routines always operate on
		 * 1 page, and that adding to resv map a 1 page entry can only
		 * ever require 1 region.
		 */
		VM_BUG_ON(dummy_out_regions_needed != 1);
2153 2154
		break;
	case VMA_COMMIT_RESV:
2155
		ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2156 2157
		/* region_add calls of range 1 should never fail. */
		VM_BUG_ON(ret < 0);
2158
		break;
2159
	case VMA_END_RESV:
2160
		region_abort(resv, idx, idx + 1, 1);
2161 2162
		ret = 0;
		break;
2163
	case VMA_ADD_RESV:
2164
		if (vma->vm_flags & VM_MAYSHARE) {
2165
			ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2166 2167 2168 2169
			/* region_add calls of range 1 should never fail. */
			VM_BUG_ON(ret < 0);
		} else {
			region_abort(resv, idx, idx + 1, 1);
2170 2171 2172
			ret = region_del(resv, idx, idx + 1);
		}
		break;
2173 2174 2175
	default:
		BUG();
	}
2176

2177
	if (vma->vm_flags & VM_MAYSHARE)
2178
		return ret;
2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198
	/*
	 * We know private mapping must have HPAGE_RESV_OWNER set.
	 *
	 * In most cases, reserves always exist for private mappings.
	 * However, a file associated with mapping could have been
	 * hole punched or truncated after reserves were consumed.
	 * As subsequent fault on such a range will not use reserves.
	 * Subtle - The reserve map for private mappings has the
	 * opposite meaning than that of shared mappings.  If NO
	 * entry is in the reserve map, it means a reservation exists.
	 * If an entry exists in the reserve map, it means the
	 * reservation has already been consumed.  As a result, the
	 * return value of this routine is the opposite of the
	 * value returned from reserve map manipulation routines above.
	 */
	if (ret > 0)
		return 0;
	if (ret == 0)
		return 1;
	return ret;
2199
}
2200 2201

static long vma_needs_reservation(struct hstate *h,
2202
			struct vm_area_struct *vma, unsigned long addr)
2203
{
2204
	return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
2205
}
2206

2207 2208 2209
static long vma_commit_reservation(struct hstate *h,
			struct vm_area_struct *vma, unsigned long addr)
{
2210 2211 2212
	return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
}

2213
static void vma_end_reservation(struct hstate *h,
2214 2215
			struct vm_area_struct *vma, unsigned long addr)
{
2216
	(void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
2217 2218
}

2219 2220 2221 2222 2223 2224 2225 2226 2227 2228
static long vma_add_reservation(struct hstate *h,
			struct vm_area_struct *vma, unsigned long addr)
{
	return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
}

/*
 * This routine is called to restore a reservation on error paths.  In the
 * specific error paths, a huge page was allocated (via alloc_huge_page)
 * and is about to be freed.  If a reservation for the page existed,
2229 2230 2231 2232 2233 2234
 * alloc_huge_page would have consumed the reservation and set
 * HPageRestoreReserve in the newly allocated page.  When the page is freed
 * via free_huge_page, the global reservation count will be incremented if
 * HPageRestoreReserve is set.  However, free_huge_page can not adjust the
 * reserve map.  Adjust the reserve map here to be consistent with global
 * reserve count adjustments to be made by free_huge_page.
2235 2236 2237 2238 2239
 */
static void restore_reserve_on_error(struct hstate *h,
			struct vm_area_struct *vma, unsigned long address,
			struct page *page)
{
2240
	if (unlikely(HPageRestoreReserve(page))) {
2241 2242 2243 2244 2245
		long rc = vma_needs_reservation(h, vma, address);

		if (unlikely(rc < 0)) {
			/*
			 * Rare out of memory condition in reserve map
2246
			 * manipulation.  Clear HPageRestoreReserve so that
2247 2248 2249 2250 2251 2252 2253 2254
			 * global reserve count will not be incremented
			 * by free_huge_page.  This will make it appear
			 * as though the reservation for this page was
			 * consumed.  This may prevent the task from
			 * faulting in the page at a later time.  This
			 * is better than inconsistent global huge page
			 * accounting of reserve counts.
			 */
2255
			ClearHPageRestoreReserve(page);
2256 2257 2258 2259 2260 2261 2262
		} else if (rc) {
			rc = vma_add_reservation(h, vma, address);
			if (unlikely(rc < 0))
				/*
				 * See above comment about rare out of
				 * memory condition.
				 */
2263
				ClearHPageRestoreReserve(page);
2264 2265 2266 2267 2268
		} else
			vma_end_reservation(h, vma, address);
	}
}

2269 2270 2271 2272
/*
 * alloc_and_dissolve_huge_page - Allocate a new page and dissolve the old one
 * @h: struct hstate old page belongs to
 * @old_page: Old page to dissolve
2273
 * @list: List to isolate the page in case we need to
2274 2275
 * Returns 0 on success, otherwise negated error.
 */
2276 2277
static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
					struct list_head *list)
2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303
{
	gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
	int nid = page_to_nid(old_page);
	struct page *new_page;
	int ret = 0;

	/*
	 * Before dissolving the page, we need to allocate a new one for the
	 * pool to remain stable. Using alloc_buddy_huge_page() allows us to
	 * not having to deal with prep_new_huge_page() and avoids dealing of any
	 * counters. This simplifies and let us do the whole thing under the
	 * lock.
	 */
	new_page = alloc_buddy_huge_page(h, gfp_mask, nid, NULL, NULL);
	if (!new_page)
		return -ENOMEM;

retry:
	spin_lock_irq(&hugetlb_lock);
	if (!PageHuge(old_page)) {
		/*
		 * Freed from under us. Drop new_page too.
		 */
		goto free_new;
	} else if (page_count(old_page)) {
		/*
2304 2305
		 * Someone has grabbed the page, try to isolate it here.
		 * Fail with -EBUSY if not possible.
2306
		 */
2307 2308 2309 2310
		spin_unlock_irq(&hugetlb_lock);
		if (!isolate_huge_page(old_page, list))
			ret = -EBUSY;
		spin_lock_irq(&hugetlb_lock);
2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359
		goto free_new;
	} else if (!HPageFreed(old_page)) {
		/*
		 * Page's refcount is 0 but it has not been enqueued in the
		 * freelist yet. Race window is small, so we can succeed here if
		 * we retry.
		 */
		spin_unlock_irq(&hugetlb_lock);
		cond_resched();
		goto retry;
	} else {
		/*
		 * Ok, old_page is still a genuine free hugepage. Remove it from
		 * the freelist and decrease the counters. These will be
		 * incremented again when calling __prep_account_new_huge_page()
		 * and enqueue_huge_page() for new_page. The counters will remain
		 * stable since this happens under the lock.
		 */
		remove_hugetlb_page(h, old_page, false);

		/*
		 * new_page needs to be initialized with the standard hugetlb
		 * state. This is normally done by prep_new_huge_page() but
		 * that takes hugetlb_lock which is already held so we need to
		 * open code it here.
		 * Reference count trick is needed because allocator gives us
		 * referenced page but the pool requires pages with 0 refcount.
		 */
		__prep_new_huge_page(new_page);
		__prep_account_new_huge_page(h, nid);
		page_ref_dec(new_page);
		enqueue_huge_page(h, new_page);

		/*
		 * Pages have been replaced, we can safely free the old one.
		 */
		spin_unlock_irq(&hugetlb_lock);
		update_and_free_page(h, old_page);
	}

	return ret;

free_new:
	spin_unlock_irq(&hugetlb_lock);
	__free_pages(new_page, huge_page_order(h));

	return ret;
}

2360
int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
2361 2362 2363
{
	struct hstate *h;
	struct page *head;
2364
	int ret = -EBUSY;
2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388

	/*
	 * The page might have been dissolved from under our feet, so make sure
	 * to carefully check the state under the lock.
	 * Return success when racing as if we dissolved the page ourselves.
	 */
	spin_lock_irq(&hugetlb_lock);
	if (PageHuge(page)) {
		head = compound_head(page);
		h = page_hstate(head);
	} else {
		spin_unlock_irq(&hugetlb_lock);
		return 0;
	}
	spin_unlock_irq(&hugetlb_lock);

	/*
	 * Fence off gigantic pages as there is a cyclic dependency between
	 * alloc_contig_range and them. Return -ENOMEM as this has the effect
	 * of bailing out right away without further retrying.
	 */
	if (hstate_is_gigantic(h))
		return -ENOMEM;

2389 2390 2391 2392 2393 2394
	if (page_count(head) && isolate_huge_page(head, list))
		ret = 0;
	else if (!page_count(head))
		ret = alloc_and_dissolve_huge_page(h, head, list);

	return ret;
2395 2396
}

2397
struct page *alloc_huge_page(struct vm_area_struct *vma,
2398
				    unsigned long addr, int avoid_reserve)
L
Linus Torvalds 已提交
2399
{
2400
	struct hugepage_subpool *spool = subpool_vma(vma);
2401
	struct hstate *h = hstate_vma(vma);
2402
	struct page *page;
2403 2404
	long map_chg, map_commit;
	long gbl_chg;
2405 2406
	int ret, idx;
	struct hugetlb_cgroup *h_cg;
2407
	bool deferred_reserve;
2408

2409
	idx = hstate_index(h);
2410
	/*
2411 2412 2413
	 * Examine the region/reserve map to determine if the process
	 * has a reservation for the page to be allocated.  A return
	 * code of zero indicates a reservation exists (no change).
2414
	 */
2415 2416
	map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
	if (map_chg < 0)
2417
		return ERR_PTR(-ENOMEM);
2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428

	/*
	 * Processes that did not create the mapping will have no
	 * reserves as indicated by the region/reserve map. Check
	 * that the allocation will not exceed the subpool limit.
	 * Allocations for MAP_NORESERVE mappings also need to be
	 * checked against any subpool limit.
	 */
	if (map_chg || avoid_reserve) {
		gbl_chg = hugepage_subpool_get_pages(spool, 1);
		if (gbl_chg < 0) {
2429
			vma_end_reservation(h, vma, addr);
2430
			return ERR_PTR(-ENOSPC);
2431
		}
L
Linus Torvalds 已提交
2432

2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444
		/*
		 * Even though there was no reservation in the region/reserve
		 * map, there could be reservations associated with the
		 * subpool that can be used.  This would be indicated if the
		 * return value of hugepage_subpool_get_pages() is zero.
		 * However, if avoid_reserve is specified we still avoid even
		 * the subpool reservations.
		 */
		if (avoid_reserve)
			gbl_chg = 1;
	}

2445 2446
	/* If this allocation is not consuming a reservation, charge it now.
	 */
2447
	deferred_reserve = map_chg || avoid_reserve;
2448 2449 2450 2451 2452 2453 2454
	if (deferred_reserve) {
		ret = hugetlb_cgroup_charge_cgroup_rsvd(
			idx, pages_per_huge_page(h), &h_cg);
		if (ret)
			goto out_subpool_put;
	}

2455
	ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
2456
	if (ret)
2457
		goto out_uncharge_cgroup_reservation;
2458

2459
	spin_lock_irq(&hugetlb_lock);
2460 2461 2462 2463 2464 2465
	/*
	 * glb_chg is passed to indicate whether or not a page must be taken
	 * from the global free pool (global change).  gbl_chg == 0 indicates
	 * a reservation exists for the allocation.
	 */
	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
2466
	if (!page) {
2467
		spin_unlock_irq(&hugetlb_lock);
2468
		page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
2469 2470
		if (!page)
			goto out_uncharge_cgroup;
2471
		if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
2472
			SetHPageRestoreReserve(page);
2473 2474
			h->resv_huge_pages--;
		}
2475
		spin_lock_irq(&hugetlb_lock);
2476
		list_add(&page->lru, &h->hugepage_activelist);
2477
		/* Fall through */
K
Ken Chen 已提交
2478
	}
2479
	hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
2480 2481 2482 2483 2484 2485 2486 2487
	/* If allocation is not consuming a reservation, also store the
	 * hugetlb_cgroup pointer on the page.
	 */
	if (deferred_reserve) {
		hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
						  h_cg, page);
	}

2488
	spin_unlock_irq(&hugetlb_lock);
2489

2490
	hugetlb_set_page_subpool(page, spool);
2491

2492 2493
	map_commit = vma_commit_reservation(h, vma, addr);
	if (unlikely(map_chg > map_commit)) {
2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506
		/*
		 * The page was added to the reservation map between
		 * vma_needs_reservation and vma_commit_reservation.
		 * This indicates a race with hugetlb_reserve_pages.
		 * Adjust for the subpool count incremented above AND
		 * in hugetlb_reserve_pages for the same page.  Also,
		 * the reservation count added in hugetlb_reserve_pages
		 * no longer applies.
		 */
		long rsv_adjust;

		rsv_adjust = hugepage_subpool_put_pages(spool, 1);
		hugetlb_acct_memory(h, -rsv_adjust);
2507 2508 2509
		if (deferred_reserve)
			hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
					pages_per_huge_page(h), page);
2510
	}
2511
	return page;
2512 2513 2514

out_uncharge_cgroup:
	hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
2515 2516 2517 2518
out_uncharge_cgroup_reservation:
	if (deferred_reserve)
		hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h),
						    h_cg);
2519
out_subpool_put:
2520
	if (map_chg || avoid_reserve)
2521
		hugepage_subpool_put_pages(spool, 1);
2522
	vma_end_reservation(h, vma, addr);
2523
	return ERR_PTR(-ENOSPC);
2524 2525
}

2526 2527 2528
int alloc_bootmem_huge_page(struct hstate *h)
	__attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
int __alloc_bootmem_huge_page(struct hstate *h)
2529 2530
{
	struct huge_bootmem_page *m;
2531
	int nr_nodes, node;
2532

2533
	for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
2534 2535
		void *addr;

2536
		addr = memblock_alloc_try_nid_raw(
2537
				huge_page_size(h), huge_page_size(h),
2538
				0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
2539 2540 2541 2542 2543 2544 2545
		if (addr) {
			/*
			 * Use the beginning of the huge page to store the
			 * huge_bootmem_page struct (until gather_bootmem
			 * puts them into the mem_map).
			 */
			m = addr;
2546
			goto found;
2547 2548 2549 2550 2551
		}
	}
	return 0;

found:
2552
	BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
2553
	/* Put them into a private list first because mem_map is not up yet */
2554
	INIT_LIST_HEAD(&m->list);
2555 2556 2557 2558 2559
	list_add(&m->list, &huge_boot_pages);
	m->hstate = h;
	return 1;
}

2560 2561
static void __init prep_compound_huge_page(struct page *page,
		unsigned int order)
2562 2563 2564 2565 2566 2567 2568
{
	if (unlikely(order > (MAX_ORDER - 1)))
		prep_compound_gigantic_page(page, order);
	else
		prep_compound_page(page, order);
}

2569 2570 2571 2572 2573 2574
/* Put bootmem huge pages into the standard lists after mem_map is up */
static void __init gather_bootmem_prealloc(void)
{
	struct huge_bootmem_page *m;

	list_for_each_entry(m, &huge_boot_pages, list) {
2575
		struct page *page = virt_to_page(m);
2576
		struct hstate *h = m->hstate;
2577

2578
		WARN_ON(page_count(page) != 1);
2579
		prep_compound_huge_page(page, huge_page_order(h));
2580
		WARN_ON(PageReserved(page));
2581
		prep_new_huge_page(h, page, page_to_nid(page));
2582 2583
		put_page(page); /* free it into the hugepage allocator */

2584 2585 2586 2587 2588 2589
		/*
		 * If we had gigantic hugepages allocated at boot time, we need
		 * to restore the 'stolen' pages to totalram_pages in order to
		 * fix confusing memory reports from free(1) and another
		 * side-effects, like CommitLimit going negative.
		 */
2590
		if (hstate_is_gigantic(h))
2591
			adjust_managed_page_count(page, pages_per_huge_page(h));
2592
		cond_resched();
2593 2594 2595
	}
}

2596
static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
L
Linus Torvalds 已提交
2597 2598
{
	unsigned long i;
2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617
	nodemask_t *node_alloc_noretry;

	if (!hstate_is_gigantic(h)) {
		/*
		 * Bit mask controlling how hard we retry per-node allocations.
		 * Ignore errors as lower level routines can deal with
		 * node_alloc_noretry == NULL.  If this kmalloc fails at boot
		 * time, we are likely in bigger trouble.
		 */
		node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry),
						GFP_KERNEL);
	} else {
		/* allocations done at boot time */
		node_alloc_noretry = NULL;
	}

	/* bit mask controlling how hard we retry per-node allocations */
	if (node_alloc_noretry)
		nodes_clear(*node_alloc_noretry);
2618

2619
	for (i = 0; i < h->max_huge_pages; ++i) {
2620
		if (hstate_is_gigantic(h)) {
2621
			if (hugetlb_cma_size) {
2622
				pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
2623
				goto free;
2624
			}
2625 2626
			if (!alloc_bootmem_huge_page(h))
				break;
2627
		} else if (!alloc_pool_huge_page(h,
2628 2629
					 &node_states[N_MEMORY],
					 node_alloc_noretry))
L
Linus Torvalds 已提交
2630
			break;
2631
		cond_resched();
L
Linus Torvalds 已提交
2632
	}
2633 2634 2635
	if (i < h->max_huge_pages) {
		char buf[32];

2636
		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2637 2638 2639 2640
		pr_warn("HugeTLB: allocating %lu of page size %s failed.  Only allocated %lu hugepages.\n",
			h->max_huge_pages, buf, i);
		h->max_huge_pages = i;
	}
2641
free:
2642
	kfree(node_alloc_noretry);
2643 2644 2645 2646 2647 2648 2649
}

static void __init hugetlb_init_hstates(void)
{
	struct hstate *h;

	for_each_hstate(h) {
2650 2651 2652
		if (minimum_order > huge_page_order(h))
			minimum_order = huge_page_order(h);

2653
		/* oversize hugepages were init'ed in early boot */
2654
		if (!hstate_is_gigantic(h))
2655
			hugetlb_hstate_alloc_pages(h);
2656
	}
2657
	VM_BUG_ON(minimum_order == UINT_MAX);
2658 2659 2660 2661 2662 2663 2664
}

static void __init report_hugepages(void)
{
	struct hstate *h;

	for_each_hstate(h) {
A
Andi Kleen 已提交
2665
		char buf[32];
2666 2667

		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2668
		pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2669
			buf, h->free_huge_pages);
2670 2671 2672
	}
}

L
Linus Torvalds 已提交
2673
#ifdef CONFIG_HIGHMEM
2674 2675
static void try_to_free_low(struct hstate *h, unsigned long count,
						nodemask_t *nodes_allowed)
L
Linus Torvalds 已提交
2676
{
2677
	int i;
2678
	LIST_HEAD(page_list);
2679

2680
	lockdep_assert_held(&hugetlb_lock);
2681
	if (hstate_is_gigantic(h))
2682 2683
		return;

2684 2685 2686
	/*
	 * Collect pages to be freed on a list, and free after dropping lock
	 */
2687
	for_each_node_mask(i, *nodes_allowed) {
2688
		struct page *page, *next;
2689 2690 2691
		struct list_head *freel = &h->hugepage_freelists[i];
		list_for_each_entry_safe(page, next, freel, lru) {
			if (count >= h->nr_huge_pages)
2692
				goto out;
L
Linus Torvalds 已提交
2693 2694
			if (PageHighMem(page))
				continue;
2695
			remove_hugetlb_page(h, page, false);
2696
			list_add(&page->lru, &page_list);
L
Linus Torvalds 已提交
2697 2698
		}
	}
2699 2700

out:
2701
	spin_unlock_irq(&hugetlb_lock);
2702
	update_and_free_pages_bulk(h, &page_list);
2703
	spin_lock_irq(&hugetlb_lock);
L
Linus Torvalds 已提交
2704 2705
}
#else
2706 2707
static inline void try_to_free_low(struct hstate *h, unsigned long count,
						nodemask_t *nodes_allowed)
L
Linus Torvalds 已提交
2708 2709 2710 2711
{
}
#endif

2712 2713 2714 2715 2716
/*
 * Increment or decrement surplus_huge_pages.  Keep node-specific counters
 * balanced by operating on them in a round-robin fashion.
 * Returns 1 if an adjustment was made.
 */
2717 2718
static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
				int delta)
2719
{
2720
	int nr_nodes, node;
2721

2722
	lockdep_assert_held(&hugetlb_lock);
2723 2724
	VM_BUG_ON(delta != -1 && delta != 1);

2725 2726 2727 2728
	if (delta < 0) {
		for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
			if (h->surplus_huge_pages_node[node])
				goto found;
2729
		}
2730 2731 2732 2733 2734
	} else {
		for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
			if (h->surplus_huge_pages_node[node] <
					h->nr_huge_pages_node[node])
				goto found;
2735
		}
2736 2737
	}
	return 0;
2738

2739 2740 2741 2742
found:
	h->surplus_huge_pages += delta;
	h->surplus_huge_pages_node[node] += delta;
	return 1;
2743 2744
}

2745
#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
2746
static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
2747
			      nodemask_t *nodes_allowed)
L
Linus Torvalds 已提交
2748
{
2749
	unsigned long min_count, ret;
2750 2751
	struct page *page;
	LIST_HEAD(page_list);
2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762
	NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);

	/*
	 * Bit mask controlling how hard we retry per-node allocations.
	 * If we can not allocate the bit mask, do not attempt to allocate
	 * the requested huge pages.
	 */
	if (node_alloc_noretry)
		nodes_clear(*node_alloc_noretry);
	else
		return -ENOMEM;
L
Linus Torvalds 已提交
2763

2764 2765 2766 2767 2768
	/*
	 * resize_lock mutex prevents concurrent adjustments to number of
	 * pages in hstate via the proc/sysfs interfaces.
	 */
	mutex_lock(&h->resize_lock);
2769
	spin_lock_irq(&hugetlb_lock);
2770

2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790
	/*
	 * Check for a node specific request.
	 * Changing node specific huge page count may require a corresponding
	 * change to the global count.  In any case, the passed node mask
	 * (nodes_allowed) will restrict alloc/free to the specified node.
	 */
	if (nid != NUMA_NO_NODE) {
		unsigned long old_count = count;

		count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
		/*
		 * User may have specified a large count value which caused the
		 * above calculation to overflow.  In this case, they wanted
		 * to allocate as many huge pages as possible.  Set count to
		 * largest possible value to align with their intention.
		 */
		if (count < old_count)
			count = ULONG_MAX;
	}

2791 2792 2793 2794 2795 2796 2797 2798 2799
	/*
	 * Gigantic pages runtime allocation depend on the capability for large
	 * page range allocation.
	 * If the system does not provide this feature, return an error when
	 * the user tries to allocate gigantic pages but let the user free the
	 * boottime allocated gigantic pages.
	 */
	if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
		if (count > persistent_huge_pages(h)) {
2800
			spin_unlock_irq(&hugetlb_lock);
2801
			mutex_unlock(&h->resize_lock);
2802
			NODEMASK_FREE(node_alloc_noretry);
2803 2804 2805 2806
			return -EINVAL;
		}
		/* Fall through to decrease pool */
	}
2807

2808 2809 2810 2811
	/*
	 * Increase the pool size
	 * First take pages out of surplus state.  Then make up the
	 * remaining difference by allocating fresh huge pages.
2812
	 *
2813
	 * We might race with alloc_surplus_huge_page() here and be unable
2814 2815 2816 2817
	 * to convert a surplus huge page to a normal huge page. That is
	 * not critical, though, it just means the overall size of the
	 * pool might be one hugepage larger than it needs to be, but
	 * within all the constraints specified by the sysctls.
2818
	 */
2819
	while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2820
		if (!adjust_pool_surplus(h, nodes_allowed, -1))
2821 2822 2823
			break;
	}

2824
	while (count > persistent_huge_pages(h)) {
2825 2826 2827 2828 2829
		/*
		 * If this allocation races such that we no longer need the
		 * page, free_huge_page will handle it by freeing the page
		 * and reducing the surplus.
		 */
2830
		spin_unlock_irq(&hugetlb_lock);
2831 2832 2833 2834

		/* yield cpu to avoid soft lockup */
		cond_resched();

2835 2836
		ret = alloc_pool_huge_page(h, nodes_allowed,
						node_alloc_noretry);
2837
		spin_lock_irq(&hugetlb_lock);
2838 2839 2840
		if (!ret)
			goto out;

2841 2842 2843
		/* Bail for signals. Probably ctrl-c from user */
		if (signal_pending(current))
			goto out;
2844 2845 2846 2847 2848 2849 2850 2851
	}

	/*
	 * Decrease the pool size
	 * First return free pages to the buddy allocator (being careful
	 * to keep enough around to satisfy reservations).  Then place
	 * pages into surplus state as needed so the pool will shrink
	 * to the desired size as pages become free.
2852 2853 2854 2855
	 *
	 * By placing pages into the surplus state independent of the
	 * overcommit value, we are allowing the surplus pool size to
	 * exceed overcommit. There are few sane options here. Since
2856
	 * alloc_surplus_huge_page() is checking the global counter,
2857 2858 2859
	 * though, we'll note that we're not allowed to exceed surplus
	 * and won't grow the pool anywhere else. Not until one of the
	 * sysctls are changed, or the surplus pages go out of use.
2860
	 */
2861
	min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2862
	min_count = max(count, min_count);
2863
	try_to_free_low(h, min_count, nodes_allowed);
2864 2865 2866 2867

	/*
	 * Collect pages to be removed on list without dropping lock
	 */
2868
	while (min_count < persistent_huge_pages(h)) {
2869 2870
		page = remove_pool_huge_page(h, nodes_allowed, 0);
		if (!page)
L
Linus Torvalds 已提交
2871
			break;
2872 2873

		list_add(&page->lru, &page_list);
L
Linus Torvalds 已提交
2874
	}
2875
	/* free the pages after dropping lock */
2876
	spin_unlock_irq(&hugetlb_lock);
2877
	update_and_free_pages_bulk(h, &page_list);
2878
	spin_lock_irq(&hugetlb_lock);
2879

2880
	while (count < persistent_huge_pages(h)) {
2881
		if (!adjust_pool_surplus(h, nodes_allowed, 1))
2882 2883 2884
			break;
	}
out:
2885
	h->max_huge_pages = persistent_huge_pages(h);
2886
	spin_unlock_irq(&hugetlb_lock);
2887
	mutex_unlock(&h->resize_lock);
2888

2889 2890
	NODEMASK_FREE(node_alloc_noretry);

2891
	return 0;
L
Linus Torvalds 已提交
2892 2893
}

2894 2895 2896 2897 2898 2899 2900 2901 2902 2903
#define HSTATE_ATTR_RO(_name) \
	static struct kobj_attribute _name##_attr = __ATTR_RO(_name)

#define HSTATE_ATTR(_name) \
	static struct kobj_attribute _name##_attr = \
		__ATTR(_name, 0644, _name##_show, _name##_store)

static struct kobject *hugepages_kobj;
static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];

2904 2905 2906
static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);

static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2907 2908
{
	int i;
2909

2910
	for (i = 0; i < HUGE_MAX_HSTATE; i++)
2911 2912 2913
		if (hstate_kobjs[i] == kobj) {
			if (nidp)
				*nidp = NUMA_NO_NODE;
2914
			return &hstates[i];
2915 2916 2917
		}

	return kobj_to_node_hstate(kobj, nidp);
2918 2919
}

2920
static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2921 2922
					struct kobj_attribute *attr, char *buf)
{
2923 2924 2925 2926 2927 2928 2929 2930 2931 2932
	struct hstate *h;
	unsigned long nr_huge_pages;
	int nid;

	h = kobj_to_hstate(kobj, &nid);
	if (nid == NUMA_NO_NODE)
		nr_huge_pages = h->nr_huge_pages;
	else
		nr_huge_pages = h->nr_huge_pages_node[nid];

2933
	return sysfs_emit(buf, "%lu\n", nr_huge_pages);
2934
}
2935

2936 2937 2938
static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
					   struct hstate *h, int nid,
					   unsigned long count, size_t len)
2939 2940
{
	int err;
2941
	nodemask_t nodes_allowed, *n_mask;
2942

2943 2944
	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
		return -EINVAL;
2945

2946 2947 2948 2949 2950
	if (nid == NUMA_NO_NODE) {
		/*
		 * global hstate attribute
		 */
		if (!(obey_mempolicy &&
2951 2952 2953 2954 2955
				init_nodemask_of_mempolicy(&nodes_allowed)))
			n_mask = &node_states[N_MEMORY];
		else
			n_mask = &nodes_allowed;
	} else {
2956
		/*
2957 2958
		 * Node specific request.  count adjustment happens in
		 * set_max_huge_pages() after acquiring hugetlb_lock.
2959
		 */
2960 2961
		init_nodemask_of_node(&nodes_allowed, nid);
		n_mask = &nodes_allowed;
2962
	}
2963

2964
	err = set_max_huge_pages(h, count, nid, n_mask);
2965

2966
	return err ? err : len;
2967 2968
}

2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985
static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
					 struct kobject *kobj, const char *buf,
					 size_t len)
{
	struct hstate *h;
	unsigned long count;
	int nid;
	int err;

	err = kstrtoul(buf, 10, &count);
	if (err)
		return err;

	h = kobj_to_hstate(kobj, &nid);
	return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
}

2986 2987 2988 2989 2990 2991 2992 2993 2994
static ssize_t nr_hugepages_show(struct kobject *kobj,
				       struct kobj_attribute *attr, char *buf)
{
	return nr_hugepages_show_common(kobj, attr, buf);
}

static ssize_t nr_hugepages_store(struct kobject *kobj,
	       struct kobj_attribute *attr, const char *buf, size_t len)
{
2995
	return nr_hugepages_store_common(false, kobj, buf, len);
2996 2997 2998
}
HSTATE_ATTR(nr_hugepages);

2999 3000 3001 3002 3003 3004 3005
#ifdef CONFIG_NUMA

/*
 * hstate attribute for optionally mempolicy-based constraint on persistent
 * huge page alloc/free.
 */
static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
3006 3007
					   struct kobj_attribute *attr,
					   char *buf)
3008 3009 3010 3011 3012 3013 3014
{
	return nr_hugepages_show_common(kobj, attr, buf);
}

static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
	       struct kobj_attribute *attr, const char *buf, size_t len)
{
3015
	return nr_hugepages_store_common(true, kobj, buf, len);
3016 3017 3018 3019 3020
}
HSTATE_ATTR(nr_hugepages_mempolicy);
#endif


3021 3022 3023
static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
					struct kobj_attribute *attr, char *buf)
{
3024
	struct hstate *h = kobj_to_hstate(kobj, NULL);
3025
	return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages);
3026
}
3027

3028 3029 3030 3031 3032
static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
		struct kobj_attribute *attr, const char *buf, size_t count)
{
	int err;
	unsigned long input;
3033
	struct hstate *h = kobj_to_hstate(kobj, NULL);
3034

3035
	if (hstate_is_gigantic(h))
3036 3037
		return -EINVAL;

3038
	err = kstrtoul(buf, 10, &input);
3039
	if (err)
3040
		return err;
3041

3042
	spin_lock_irq(&hugetlb_lock);
3043
	h->nr_overcommit_huge_pages = input;
3044
	spin_unlock_irq(&hugetlb_lock);
3045 3046 3047 3048 3049 3050 3051 3052

	return count;
}
HSTATE_ATTR(nr_overcommit_hugepages);

static ssize_t free_hugepages_show(struct kobject *kobj,
					struct kobj_attribute *attr, char *buf)
{
3053 3054 3055 3056 3057 3058 3059 3060 3061 3062
	struct hstate *h;
	unsigned long free_huge_pages;
	int nid;

	h = kobj_to_hstate(kobj, &nid);
	if (nid == NUMA_NO_NODE)
		free_huge_pages = h->free_huge_pages;
	else
		free_huge_pages = h->free_huge_pages_node[nid];

3063
	return sysfs_emit(buf, "%lu\n", free_huge_pages);
3064 3065 3066 3067 3068 3069
}
HSTATE_ATTR_RO(free_hugepages);

static ssize_t resv_hugepages_show(struct kobject *kobj,
					struct kobj_attribute *attr, char *buf)
{
3070
	struct hstate *h = kobj_to_hstate(kobj, NULL);
3071
	return sysfs_emit(buf, "%lu\n", h->resv_huge_pages);
3072 3073 3074 3075 3076 3077
}
HSTATE_ATTR_RO(resv_hugepages);

static ssize_t surplus_hugepages_show(struct kobject *kobj,
					struct kobj_attribute *attr, char *buf)
{
3078 3079 3080 3081 3082 3083 3084 3085 3086 3087
	struct hstate *h;
	unsigned long surplus_huge_pages;
	int nid;

	h = kobj_to_hstate(kobj, &nid);
	if (nid == NUMA_NO_NODE)
		surplus_huge_pages = h->surplus_huge_pages;
	else
		surplus_huge_pages = h->surplus_huge_pages_node[nid];

3088
	return sysfs_emit(buf, "%lu\n", surplus_huge_pages);
3089 3090 3091 3092 3093 3094 3095 3096 3097
}
HSTATE_ATTR_RO(surplus_hugepages);

static struct attribute *hstate_attrs[] = {
	&nr_hugepages_attr.attr,
	&nr_overcommit_hugepages_attr.attr,
	&free_hugepages_attr.attr,
	&resv_hugepages_attr.attr,
	&surplus_hugepages_attr.attr,
3098 3099 3100
#ifdef CONFIG_NUMA
	&nr_hugepages_mempolicy_attr.attr,
#endif
3101 3102 3103
	NULL,
};

3104
static const struct attribute_group hstate_attr_group = {
3105 3106 3107
	.attrs = hstate_attrs,
};

J
Jeff Mahoney 已提交
3108 3109
static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
				    struct kobject **hstate_kobjs,
3110
				    const struct attribute_group *hstate_attr_group)
3111 3112
{
	int retval;
3113
	int hi = hstate_index(h);
3114

3115 3116
	hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
	if (!hstate_kobjs[hi])
3117 3118
		return -ENOMEM;

3119
	retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
3120
	if (retval) {
3121
		kobject_put(hstate_kobjs[hi]);
3122 3123
		hstate_kobjs[hi] = NULL;
	}
3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137

	return retval;
}

static void __init hugetlb_sysfs_init(void)
{
	struct hstate *h;
	int err;

	hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
	if (!hugepages_kobj)
		return;

	for_each_hstate(h) {
3138 3139
		err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
					 hstate_kobjs, &hstate_attr_group);
3140
		if (err)
3141
			pr_err("HugeTLB: Unable to add hstate %s", h->name);
3142 3143 3144
	}
}

3145 3146 3147 3148
#ifdef CONFIG_NUMA

/*
 * node_hstate/s - associate per node hstate attributes, via their kobjects,
3149 3150 3151
 * with node devices in node_devices[] using a parallel array.  The array
 * index of a node device or _hstate == node id.
 * This is here to avoid any static dependency of the node device driver, in
3152 3153 3154 3155 3156 3157
 * the base kernel, on the hugetlb module.
 */
struct node_hstate {
	struct kobject		*hugepages_kobj;
	struct kobject		*hstate_kobjs[HUGE_MAX_HSTATE];
};
3158
static struct node_hstate node_hstates[MAX_NUMNODES];
3159 3160

/*
3161
 * A subset of global hstate attributes for node devices
3162 3163 3164 3165 3166 3167 3168 3169
 */
static struct attribute *per_node_hstate_attrs[] = {
	&nr_hugepages_attr.attr,
	&free_hugepages_attr.attr,
	&surplus_hugepages_attr.attr,
	NULL,
};

3170
static const struct attribute_group per_node_hstate_attr_group = {
3171 3172 3173 3174
	.attrs = per_node_hstate_attrs,
};

/*
3175
 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197
 * Returns node id via non-NULL nidp.
 */
static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
{
	int nid;

	for (nid = 0; nid < nr_node_ids; nid++) {
		struct node_hstate *nhs = &node_hstates[nid];
		int i;
		for (i = 0; i < HUGE_MAX_HSTATE; i++)
			if (nhs->hstate_kobjs[i] == kobj) {
				if (nidp)
					*nidp = nid;
				return &hstates[i];
			}
	}

	BUG();
	return NULL;
}

/*
3198
 * Unregister hstate attributes from a single node device.
3199 3200
 * No-op if no hstate attributes attached.
 */
3201
static void hugetlb_unregister_node(struct node *node)
3202 3203
{
	struct hstate *h;
3204
	struct node_hstate *nhs = &node_hstates[node->dev.id];
3205 3206

	if (!nhs->hugepages_kobj)
3207
		return;		/* no hstate attributes */
3208

3209 3210 3211 3212 3213
	for_each_hstate(h) {
		int idx = hstate_index(h);
		if (nhs->hstate_kobjs[idx]) {
			kobject_put(nhs->hstate_kobjs[idx]);
			nhs->hstate_kobjs[idx] = NULL;
3214
		}
3215
	}
3216 3217 3218 3219 3220 3221 3222

	kobject_put(nhs->hugepages_kobj);
	nhs->hugepages_kobj = NULL;
}


/*
3223
 * Register hstate attributes for a single node device.
3224 3225
 * No-op if attributes already registered.
 */
3226
static void hugetlb_register_node(struct node *node)
3227 3228
{
	struct hstate *h;
3229
	struct node_hstate *nhs = &node_hstates[node->dev.id];
3230 3231 3232 3233 3234 3235
	int err;

	if (nhs->hugepages_kobj)
		return;		/* already allocated */

	nhs->hugepages_kobj = kobject_create_and_add("hugepages",
3236
							&node->dev.kobj);
3237 3238 3239 3240 3241 3242 3243 3244
	if (!nhs->hugepages_kobj)
		return;

	for_each_hstate(h) {
		err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
						nhs->hstate_kobjs,
						&per_node_hstate_attr_group);
		if (err) {
3245
			pr_err("HugeTLB: Unable to add hstate %s for node %d\n",
3246
				h->name, node->dev.id);
3247 3248 3249 3250 3251 3252 3253
			hugetlb_unregister_node(node);
			break;
		}
	}
}

/*
3254
 * hugetlb init time:  register hstate attributes for all registered node
3255 3256
 * devices of nodes that have memory.  All on-line nodes should have
 * registered their associated device by this time.
3257
 */
3258
static void __init hugetlb_register_all_nodes(void)
3259 3260 3261
{
	int nid;

3262
	for_each_node_state(nid, N_MEMORY) {
3263
		struct node *node = node_devices[nid];
3264
		if (node->dev.id == nid)
3265 3266 3267 3268
			hugetlb_register_node(node);
	}

	/*
3269
	 * Let the node device driver know we're here so it can
3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288
	 * [un]register hstate attributes on node hotplug.
	 */
	register_hugetlbfs_with_node(hugetlb_register_node,
				     hugetlb_unregister_node);
}
#else	/* !CONFIG_NUMA */

static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
{
	BUG();
	if (nidp)
		*nidp = -1;
	return NULL;
}

static void hugetlb_register_all_nodes(void) { }

#endif

3289 3290
static int __init hugetlb_init(void)
{
3291 3292
	int i;

3293 3294 3295
	BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE <
			__NR_HPAGEFLAGS);

3296 3297 3298
	if (!hugepages_supported()) {
		if (hugetlb_max_hstate || default_hstate_max_huge_pages)
			pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
3299
		return 0;
3300
	}
3301

3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329
	/*
	 * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists.  Some
	 * architectures depend on setup being done here.
	 */
	hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
	if (!parsed_default_hugepagesz) {
		/*
		 * If we did not parse a default huge page size, set
		 * default_hstate_idx to HPAGE_SIZE hstate. And, if the
		 * number of huge pages for this default size was implicitly
		 * specified, set that here as well.
		 * Note that the implicit setting will overwrite an explicit
		 * setting.  A warning will be printed in this case.
		 */
		default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE));
		if (default_hstate_max_huge_pages) {
			if (default_hstate.max_huge_pages) {
				char buf[32];

				string_get_size(huge_page_size(&default_hstate),
					1, STRING_UNITS_2, buf, 32);
				pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n",
					default_hstate.max_huge_pages, buf);
				pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n",
					default_hstate_max_huge_pages);
			}
			default_hstate.max_huge_pages =
				default_hstate_max_huge_pages;
3330
		}
3331
	}
3332

3333
	hugetlb_cma_check();
3334
	hugetlb_init_hstates();
3335
	gather_bootmem_prealloc();
3336 3337 3338
	report_hugepages();

	hugetlb_sysfs_init();
3339
	hugetlb_register_all_nodes();
3340
	hugetlb_cgroup_file_init();
3341

3342 3343 3344 3345 3346
#ifdef CONFIG_SMP
	num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
#else
	num_fault_mutexes = 1;
#endif
3347
	hugetlb_fault_mutex_table =
3348 3349
		kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
			      GFP_KERNEL);
3350
	BUG_ON(!hugetlb_fault_mutex_table);
3351 3352

	for (i = 0; i < num_fault_mutexes; i++)
3353
		mutex_init(&hugetlb_fault_mutex_table[i]);
3354 3355
	return 0;
}
3356
subsys_initcall(hugetlb_init);
3357

3358 3359
/* Overwritten by architectures with more huge page sizes */
bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size)
3360
{
3361
	return size == HPAGE_SIZE;
3362 3363
}

3364
void __init hugetlb_add_hstate(unsigned int order)
3365 3366
{
	struct hstate *h;
3367 3368
	unsigned long i;

3369 3370 3371
	if (size_to_hstate(PAGE_SIZE << order)) {
		return;
	}
3372
	BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
3373
	BUG_ON(order == 0);
3374
	h = &hstates[hugetlb_max_hstate++];
3375
	mutex_init(&h->resize_lock);
3376
	h->order = order;
3377
	h->mask = ~(huge_page_size(h) - 1);
3378 3379
	for (i = 0; i < MAX_NUMNODES; ++i)
		INIT_LIST_HEAD(&h->hugepage_freelists[i]);
3380
	INIT_LIST_HEAD(&h->hugepage_activelist);
3381 3382
	h->next_nid_to_alloc = first_memory_node;
	h->next_nid_to_free = first_memory_node;
3383 3384
	snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
					huge_page_size(h)/1024);
3385

3386 3387 3388
	parsed_hstate = h;
}

3389 3390 3391 3392 3393 3394 3395 3396
/*
 * hugepages command line processing
 * hugepages normally follows a valid hugepagsz or default_hugepagsz
 * specification.  If not, ignore the hugepages value.  hugepages can also
 * be the first huge page command line  option in which case it implicitly
 * specifies the number of huge pages for the default size.
 */
static int __init hugepages_setup(char *s)
3397 3398
{
	unsigned long *mhp;
3399
	static unsigned long *last_mhp;
3400

3401
	if (!parsed_valid_hugepagesz) {
3402
		pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s);
3403
		parsed_valid_hugepagesz = true;
3404
		return 0;
3405
	}
3406

3407
	/*
3408 3409 3410 3411
	 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter
	 * yet, so this hugepages= parameter goes to the "default hstate".
	 * Otherwise, it goes with the previously parsed hugepagesz or
	 * default_hugepagesz.
3412
	 */
3413
	else if (!hugetlb_max_hstate)
3414 3415 3416 3417
		mhp = &default_hstate_max_huge_pages;
	else
		mhp = &parsed_hstate->max_huge_pages;

3418
	if (mhp == last_mhp) {
3419 3420
		pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s);
		return 0;
3421 3422
	}

3423 3424 3425
	if (sscanf(s, "%lu", mhp) <= 0)
		*mhp = 0;

3426 3427
	/*
	 * Global state is always initialized later in hugetlb_init.
3428
	 * But we need to allocate gigantic hstates here early to still
3429 3430
	 * use the bootmem allocator.
	 */
3431
	if (hugetlb_max_hstate && hstate_is_gigantic(parsed_hstate))
3432 3433 3434 3435
		hugetlb_hstate_alloc_pages(parsed_hstate);

	last_mhp = mhp;

3436 3437
	return 1;
}
3438
__setup("hugepages=", hugepages_setup);
3439

3440 3441 3442 3443 3444 3445 3446
/*
 * hugepagesz command line processing
 * A specific huge page size can only be specified once with hugepagesz.
 * hugepagesz is followed by hugepages on the command line.  The global
 * variable 'parsed_valid_hugepagesz' is used to determine if prior
 * hugepagesz argument was valid.
 */
3447
static int __init hugepagesz_setup(char *s)
3448
{
3449
	unsigned long size;
3450 3451 3452
	struct hstate *h;

	parsed_valid_hugepagesz = false;
3453 3454 3455
	size = (unsigned long)memparse(s, NULL);

	if (!arch_hugetlb_valid_size(size)) {
3456
		pr_err("HugeTLB: unsupported hugepagesz=%s\n", s);
3457 3458 3459
		return 0;
	}

3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482
	h = size_to_hstate(size);
	if (h) {
		/*
		 * hstate for this size already exists.  This is normally
		 * an error, but is allowed if the existing hstate is the
		 * default hstate.  More specifically, it is only allowed if
		 * the number of huge pages for the default hstate was not
		 * previously specified.
		 */
		if (!parsed_default_hugepagesz ||  h != &default_hstate ||
		    default_hstate.max_huge_pages) {
			pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s);
			return 0;
		}

		/*
		 * No need to call hugetlb_add_hstate() as hstate already
		 * exists.  But, do set parsed_hstate so that a following
		 * hugepages= parameter will be applied to this hstate.
		 */
		parsed_hstate = h;
		parsed_valid_hugepagesz = true;
		return 1;
3483 3484
	}

3485
	hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
3486
	parsed_valid_hugepagesz = true;
3487 3488
	return 1;
}
3489 3490
__setup("hugepagesz=", hugepagesz_setup);

3491 3492 3493 3494
/*
 * default_hugepagesz command line input
 * Only one instance of default_hugepagesz allowed on command line.
 */
3495
static int __init default_hugepagesz_setup(char *s)
3496
{
3497 3498
	unsigned long size;

3499 3500 3501 3502 3503 3504
	parsed_valid_hugepagesz = false;
	if (parsed_default_hugepagesz) {
		pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s);
		return 0;
	}

3505 3506 3507
	size = (unsigned long)memparse(s, NULL);

	if (!arch_hugetlb_valid_size(size)) {
3508
		pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s);
3509 3510 3511
		return 0;
	}

3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530
	hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
	parsed_valid_hugepagesz = true;
	parsed_default_hugepagesz = true;
	default_hstate_idx = hstate_index(size_to_hstate(size));

	/*
	 * The number of default huge pages (for this size) could have been
	 * specified as the first hugetlb parameter: hugepages=X.  If so,
	 * then default_hstate_max_huge_pages is set.  If the default huge
	 * page size is gigantic (>= MAX_ORDER), then the pages must be
	 * allocated here from bootmem allocator.
	 */
	if (default_hstate_max_huge_pages) {
		default_hstate.max_huge_pages = default_hstate_max_huge_pages;
		if (hstate_is_gigantic(&default_hstate))
			hugetlb_hstate_alloc_pages(&default_hstate);
		default_hstate_max_huge_pages = 0;
	}

3531 3532
	return 1;
}
3533
__setup("default_hugepagesz=", default_hugepagesz_setup);
3534

3535
static unsigned int allowed_mems_nr(struct hstate *h)
3536 3537 3538
{
	int node;
	unsigned int nr = 0;
3539 3540 3541 3542 3543
	nodemask_t *mpol_allowed;
	unsigned int *array = h->free_huge_pages_node;
	gfp_t gfp_mask = htlb_alloc_mask(h);

	mpol_allowed = policy_nodemask_current(gfp_mask);
3544

3545
	for_each_node_mask(node, cpuset_current_mems_allowed) {
3546
		if (!mpol_allowed || node_isset(node, *mpol_allowed))
3547 3548
			nr += array[node];
	}
3549 3550 3551 3552 3553

	return nr;
}

#ifdef CONFIG_SYSCTL
3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569
static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write,
					  void *buffer, size_t *length,
					  loff_t *ppos, unsigned long *out)
{
	struct ctl_table dup_table;

	/*
	 * In order to avoid races with __do_proc_doulongvec_minmax(), we
	 * can duplicate the @table and alter the duplicate of it.
	 */
	dup_table = *table;
	dup_table.data = out;

	return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos);
}

3570 3571
static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
			 struct ctl_table *table, int write,
3572
			 void *buffer, size_t *length, loff_t *ppos)
L
Linus Torvalds 已提交
3573
{
3574
	struct hstate *h = &default_hstate;
3575
	unsigned long tmp = h->max_huge_pages;
3576
	int ret;
3577

3578
	if (!hugepages_supported())
3579
		return -EOPNOTSUPP;
3580

3581 3582
	ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
					     &tmp);
3583 3584
	if (ret)
		goto out;
3585

3586 3587 3588
	if (write)
		ret = __nr_hugepages_store_common(obey_mempolicy, h,
						  NUMA_NO_NODE, tmp, *length);
3589 3590
out:
	return ret;
L
Linus Torvalds 已提交
3591
}
3592

3593
int hugetlb_sysctl_handler(struct ctl_table *table, int write,
3594
			  void *buffer, size_t *length, loff_t *ppos)
3595 3596 3597 3598 3599 3600 3601 3602
{

	return hugetlb_sysctl_handler_common(false, table, write,
							buffer, length, ppos);
}

#ifdef CONFIG_NUMA
int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
3603
			  void *buffer, size_t *length, loff_t *ppos)
3604 3605 3606 3607 3608 3609
{
	return hugetlb_sysctl_handler_common(true, table, write,
							buffer, length, ppos);
}
#endif /* CONFIG_NUMA */

3610
int hugetlb_overcommit_handler(struct ctl_table *table, int write,
3611
		void *buffer, size_t *length, loff_t *ppos)
3612
{
3613
	struct hstate *h = &default_hstate;
3614
	unsigned long tmp;
3615
	int ret;
3616

3617
	if (!hugepages_supported())
3618
		return -EOPNOTSUPP;
3619

3620
	tmp = h->nr_overcommit_huge_pages;
3621

3622
	if (write && hstate_is_gigantic(h))
3623 3624
		return -EINVAL;

3625 3626
	ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
					     &tmp);
3627 3628
	if (ret)
		goto out;
3629 3630

	if (write) {
3631
		spin_lock_irq(&hugetlb_lock);
3632
		h->nr_overcommit_huge_pages = tmp;
3633
		spin_unlock_irq(&hugetlb_lock);
3634
	}
3635 3636
out:
	return ret;
3637 3638
}

L
Linus Torvalds 已提交
3639 3640
#endif /* CONFIG_SYSCTL */

3641
void hugetlb_report_meminfo(struct seq_file *m)
L
Linus Torvalds 已提交
3642
{
3643 3644 3645
	struct hstate *h;
	unsigned long total = 0;

3646 3647
	if (!hugepages_supported())
		return;
3648 3649 3650 3651

	for_each_hstate(h) {
		unsigned long count = h->nr_huge_pages;

3652
		total += huge_page_size(h) * count;
3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664

		if (h == &default_hstate)
			seq_printf(m,
				   "HugePages_Total:   %5lu\n"
				   "HugePages_Free:    %5lu\n"
				   "HugePages_Rsvd:    %5lu\n"
				   "HugePages_Surp:    %5lu\n"
				   "Hugepagesize:   %8lu kB\n",
				   count,
				   h->free_huge_pages,
				   h->resv_huge_pages,
				   h->surplus_huge_pages,
3665
				   huge_page_size(h) / SZ_1K);
3666 3667
	}

3668
	seq_printf(m, "Hugetlb:        %8lu kB\n", total / SZ_1K);
L
Linus Torvalds 已提交
3669 3670
}

3671
int hugetlb_report_node_meminfo(char *buf, int len, int nid)
L
Linus Torvalds 已提交
3672
{
3673
	struct hstate *h = &default_hstate;
3674

3675 3676
	if (!hugepages_supported())
		return 0;
3677 3678 3679 3680 3681 3682 3683 3684

	return sysfs_emit_at(buf, len,
			     "Node %d HugePages_Total: %5u\n"
			     "Node %d HugePages_Free:  %5u\n"
			     "Node %d HugePages_Surp:  %5u\n",
			     nid, h->nr_huge_pages_node[nid],
			     nid, h->free_huge_pages_node[nid],
			     nid, h->surplus_huge_pages_node[nid]);
L
Linus Torvalds 已提交
3685 3686
}

3687 3688 3689 3690 3691
void hugetlb_show_meminfo(void)
{
	struct hstate *h;
	int nid;

3692 3693 3694
	if (!hugepages_supported())
		return;

3695 3696 3697 3698 3699 3700 3701
	for_each_node_state(nid, N_MEMORY)
		for_each_hstate(h)
			pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
				nid,
				h->nr_huge_pages_node[nid],
				h->free_huge_pages_node[nid],
				h->surplus_huge_pages_node[nid],
3702
				huge_page_size(h) / SZ_1K);
3703 3704
}

3705 3706 3707 3708 3709 3710
void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
{
	seq_printf(m, "HugetlbPages:\t%8lu kB\n",
		   atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
}

L
Linus Torvalds 已提交
3711 3712 3713
/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
unsigned long hugetlb_total_pages(void)
{
3714 3715 3716 3717 3718 3719
	struct hstate *h;
	unsigned long nr_total_pages = 0;

	for_each_hstate(h)
		nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
	return nr_total_pages;
L
Linus Torvalds 已提交
3720 3721
}

3722
static int hugetlb_acct_memory(struct hstate *h, long delta)
M
Mel Gorman 已提交
3723 3724 3725
{
	int ret = -ENOMEM;

3726 3727 3728
	if (!delta)
		return 0;

3729
	spin_lock_irq(&hugetlb_lock);
M
Mel Gorman 已提交
3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745
	/*
	 * When cpuset is configured, it breaks the strict hugetlb page
	 * reservation as the accounting is done on a global variable. Such
	 * reservation is completely rubbish in the presence of cpuset because
	 * the reservation is not checked against page availability for the
	 * current cpuset. Application can still potentially OOM'ed by kernel
	 * with lack of free htlb page in cpuset that the task is in.
	 * Attempt to enforce strict accounting with cpuset is almost
	 * impossible (or too ugly) because cpuset is too fluid that
	 * task or memory node can be dynamically moved between cpusets.
	 *
	 * The change of semantics for shared hugetlb mapping with cpuset is
	 * undesirable. However, in order to preserve some of the semantics,
	 * we fall back to check against current free page availability as
	 * a best attempt and hopefully to minimize the impact of changing
	 * semantics that cpuset has.
3746 3747 3748 3749 3750 3751
	 *
	 * Apart from cpuset, we also have memory policy mechanism that
	 * also determines from which node the kernel will allocate memory
	 * in a NUMA system. So similar to cpuset, we also should consider
	 * the memory policy of the current task. Similar to the description
	 * above.
M
Mel Gorman 已提交
3752 3753
	 */
	if (delta > 0) {
3754
		if (gather_surplus_pages(h, delta) < 0)
M
Mel Gorman 已提交
3755 3756
			goto out;

3757
		if (delta > allowed_mems_nr(h)) {
3758
			return_unused_surplus_pages(h, delta);
M
Mel Gorman 已提交
3759 3760 3761 3762 3763 3764
			goto out;
		}
	}

	ret = 0;
	if (delta < 0)
3765
		return_unused_surplus_pages(h, (unsigned long) -delta);
M
Mel Gorman 已提交
3766 3767

out:
3768
	spin_unlock_irq(&hugetlb_lock);
M
Mel Gorman 已提交
3769 3770 3771
	return ret;
}

3772 3773
static void hugetlb_vm_op_open(struct vm_area_struct *vma)
{
3774
	struct resv_map *resv = vma_resv_map(vma);
3775 3776 3777 3778 3779

	/*
	 * This new VMA should share its siblings reservation map if present.
	 * The VMA will only ever have a valid reservation map pointer where
	 * it is being copied for another still existing VMA.  As that VMA
L
Lucas De Marchi 已提交
3780
	 * has a reference to the reservation map it cannot disappear until
3781 3782 3783
	 * after this open call completes.  It is therefore safe to take a
	 * new reference here without additional locking.
	 */
3784
	if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3785
		kref_get(&resv->refs);
3786 3787
}

3788 3789
static void hugetlb_vm_op_close(struct vm_area_struct *vma)
{
3790
	struct hstate *h = hstate_vma(vma);
3791
	struct resv_map *resv = vma_resv_map(vma);
3792
	struct hugepage_subpool *spool = subpool_vma(vma);
3793
	unsigned long reserve, start, end;
3794
	long gbl_reserve;
3795

3796 3797
	if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
		return;
3798

3799 3800
	start = vma_hugecache_offset(h, vma, vma->vm_start);
	end = vma_hugecache_offset(h, vma, vma->vm_end);
3801

3802
	reserve = (end - start) - region_count(resv, start, end);
3803
	hugetlb_cgroup_uncharge_counter(resv, start, end);
3804
	if (reserve) {
3805 3806 3807 3808 3809 3810
		/*
		 * Decrement reserve counts.  The global reserve count may be
		 * adjusted if the subpool has a minimum size.
		 */
		gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
		hugetlb_acct_memory(h, -gbl_reserve);
3811
	}
3812 3813

	kref_put(&resv->refs, resv_map_release);
3814 3815
}

3816 3817 3818 3819 3820 3821 3822
static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
{
	if (addr & ~(huge_page_mask(hstate_vma(vma))))
		return -EINVAL;
	return 0;
}

3823 3824
static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
{
3825
	return huge_page_size(hstate_vma(vma));
3826 3827
}

L
Linus Torvalds 已提交
3828 3829 3830
/*
 * We cannot handle pagefaults against hugetlb pages at all.  They cause
 * handle_mm_fault() to try to instantiate regular-sized pages in the
M
Miaohe Lin 已提交
3831
 * hugepage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
L
Linus Torvalds 已提交
3832 3833
 * this far.
 */
3834
static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
L
Linus Torvalds 已提交
3835 3836
{
	BUG();
N
Nick Piggin 已提交
3837
	return 0;
L
Linus Torvalds 已提交
3838 3839
}

3840 3841 3842 3843 3844 3845 3846
/*
 * When a new function is introduced to vm_operations_struct and added
 * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
 * This is because under System V memory model, mappings created via
 * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
 * their original vm_ops are overwritten with shm_vm_ops.
 */
3847
const struct vm_operations_struct hugetlb_vm_ops = {
N
Nick Piggin 已提交
3848
	.fault = hugetlb_vm_op_fault,
3849
	.open = hugetlb_vm_op_open,
3850
	.close = hugetlb_vm_op_close,
3851
	.may_split = hugetlb_vm_op_split,
3852
	.pagesize = hugetlb_vm_op_pagesize,
L
Linus Torvalds 已提交
3853 3854
};

3855 3856
static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
				int writable)
D
David Gibson 已提交
3857 3858 3859
{
	pte_t entry;

3860
	if (writable) {
3861 3862
		entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
					 vma->vm_page_prot)));
D
David Gibson 已提交
3863
	} else {
3864 3865
		entry = huge_pte_wrprotect(mk_huge_pte(page,
					   vma->vm_page_prot));
D
David Gibson 已提交
3866 3867 3868
	}
	entry = pte_mkyoung(entry);
	entry = pte_mkhuge(entry);
3869
	entry = arch_make_huge_pte(entry, vma, page, writable);
D
David Gibson 已提交
3870 3871 3872 3873

	return entry;
}

3874 3875 3876 3877 3878
static void set_huge_ptep_writable(struct vm_area_struct *vma,
				   unsigned long address, pte_t *ptep)
{
	pte_t entry;

3879
	entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
3880
	if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3881
		update_mmu_cache(vma, address, ptep);
3882 3883
}

3884
bool is_hugetlb_entry_migration(pte_t pte)
3885 3886 3887 3888
{
	swp_entry_t swp;

	if (huge_pte_none(pte) || pte_present(pte))
3889
		return false;
3890
	swp = pte_to_swp_entry(pte);
3891
	if (is_migration_entry(swp))
3892
		return true;
3893
	else
3894
		return false;
3895 3896
}

3897
static bool is_hugetlb_entry_hwpoisoned(pte_t pte)
3898 3899 3900 3901
{
	swp_entry_t swp;

	if (huge_pte_none(pte) || pte_present(pte))
3902
		return false;
3903
	swp = pte_to_swp_entry(pte);
3904
	if (is_hwpoison_entry(swp))
3905
		return true;
3906
	else
3907
		return false;
3908
}
3909

3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921
static void
hugetlb_install_page(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr,
		     struct page *new_page)
{
	__SetPageUptodate(new_page);
	set_huge_pte_at(vma->vm_mm, addr, ptep, make_huge_pte(vma, new_page, 1));
	hugepage_add_new_anon_rmap(new_page, vma, addr);
	hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm);
	ClearHPageRestoreReserve(new_page);
	SetHPageMigratable(new_page);
}

D
David Gibson 已提交
3922 3923 3924
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
			    struct vm_area_struct *vma)
{
3925
	pte_t *src_pte, *dst_pte, entry, dst_entry;
D
David Gibson 已提交
3926
	struct page *ptepage;
3927
	unsigned long addr;
3928
	bool cow = is_cow_mapping(vma->vm_flags);
3929 3930
	struct hstate *h = hstate_vma(vma);
	unsigned long sz = huge_page_size(h);
3931
	unsigned long npages = pages_per_huge_page(h);
3932
	struct address_space *mapping = vma->vm_file->f_mapping;
3933
	struct mmu_notifier_range range;
3934
	int ret = 0;
3935

3936
	if (cow) {
3937
		mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, src,
3938
					vma->vm_start,
3939 3940
					vma->vm_end);
		mmu_notifier_invalidate_range_start(&range);
3941 3942 3943 3944 3945 3946 3947 3948
	} else {
		/*
		 * For shared mappings i_mmap_rwsem must be held to call
		 * huge_pte_alloc, otherwise the returned ptep could go
		 * away if part of a shared pmd and another thread calls
		 * huge_pmd_unshare.
		 */
		i_mmap_lock_read(mapping);
3949
	}
3950

3951
	for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3952
		spinlock_t *src_ptl, *dst_ptl;
3953
		src_pte = huge_pte_offset(src, addr, sz);
H
Hugh Dickins 已提交
3954 3955
		if (!src_pte)
			continue;
3956
		dst_pte = huge_pte_alloc(dst, vma, addr, sz);
3957 3958 3959 3960
		if (!dst_pte) {
			ret = -ENOMEM;
			break;
		}
3961

3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972
		/*
		 * If the pagetables are shared don't copy or take references.
		 * dst_pte == src_pte is the common case of src/dest sharing.
		 *
		 * However, src could have 'unshared' and dst shares with
		 * another vma.  If dst_pte !none, this implies sharing.
		 * Check here before taking page table lock, and once again
		 * after taking the lock below.
		 */
		dst_entry = huge_ptep_get(dst_pte);
		if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
3973 3974
			continue;

3975 3976 3977
		dst_ptl = huge_pte_lock(h, dst, dst_pte);
		src_ptl = huge_pte_lockptr(h, src, src_pte);
		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
3978
		entry = huge_ptep_get(src_pte);
3979
		dst_entry = huge_ptep_get(dst_pte);
3980
again:
3981 3982 3983 3984 3985 3986
		if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
			/*
			 * Skip if src entry none.  Also, skip in the
			 * unlikely case dst entry !none as this implies
			 * sharing with another vma.
			 */
3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998
			;
		} else if (unlikely(is_hugetlb_entry_migration(entry) ||
				    is_hugetlb_entry_hwpoisoned(entry))) {
			swp_entry_t swp_entry = pte_to_swp_entry(entry);

			if (is_write_migration_entry(swp_entry) && cow) {
				/*
				 * COW mappings require pages in both
				 * parent and child to be set to read.
				 */
				make_migration_entry_read(&swp_entry);
				entry = swp_entry_to_pte(swp_entry);
3999 4000
				set_huge_swap_pte_at(src, addr, src_pte,
						     entry, sz);
4001
			}
4002
			set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
4003
		} else {
4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049
			entry = huge_ptep_get(src_pte);
			ptepage = pte_page(entry);
			get_page(ptepage);

			/*
			 * This is a rare case where we see pinned hugetlb
			 * pages while they're prone to COW.  We need to do the
			 * COW earlier during fork.
			 *
			 * When pre-allocating the page or copying data, we
			 * need to be without the pgtable locks since we could
			 * sleep during the process.
			 */
			if (unlikely(page_needs_cow_for_dma(vma, ptepage))) {
				pte_t src_pte_old = entry;
				struct page *new;

				spin_unlock(src_ptl);
				spin_unlock(dst_ptl);
				/* Do not use reserve as it's private owned */
				new = alloc_huge_page(vma, addr, 1);
				if (IS_ERR(new)) {
					put_page(ptepage);
					ret = PTR_ERR(new);
					break;
				}
				copy_user_huge_page(new, ptepage, addr, vma,
						    npages);
				put_page(ptepage);

				/* Install the new huge page if src pte stable */
				dst_ptl = huge_pte_lock(h, dst, dst_pte);
				src_ptl = huge_pte_lockptr(h, src, src_pte);
				spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
				entry = huge_ptep_get(src_pte);
				if (!pte_same(src_pte_old, entry)) {
					put_page(new);
					/* dst_entry won't change as in child */
					goto again;
				}
				hugetlb_install_page(vma, dst_pte, addr, new);
				spin_unlock(src_ptl);
				spin_unlock(dst_ptl);
				continue;
			}

4050
			if (cow) {
4051 4052 4053 4054 4055
				/*
				 * No need to notify as we are downgrading page
				 * table protection not changing it to point
				 * to a new page.
				 *
4056
				 * See Documentation/vm/mmu_notifier.rst
4057
				 */
4058
				huge_ptep_set_wrprotect(src, addr, src_pte);
4059
				entry = huge_pte_wrprotect(entry);
4060
			}
4061

4062
			page_dup_rmap(ptepage, true);
4063
			set_huge_pte_at(dst, addr, dst_pte, entry);
4064
			hugetlb_count_add(npages, dst);
4065
		}
4066 4067
		spin_unlock(src_ptl);
		spin_unlock(dst_ptl);
D
David Gibson 已提交
4068 4069
	}

4070
	if (cow)
4071
		mmu_notifier_invalidate_range_end(&range);
4072 4073
	else
		i_mmap_unlock_read(mapping);
4074 4075

	return ret;
D
David Gibson 已提交
4076 4077
}

4078 4079 4080
void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
			    unsigned long start, unsigned long end,
			    struct page *ref_page)
D
David Gibson 已提交
4081 4082 4083
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long address;
4084
	pte_t *ptep;
D
David Gibson 已提交
4085
	pte_t pte;
4086
	spinlock_t *ptl;
D
David Gibson 已提交
4087
	struct page *page;
4088 4089
	struct hstate *h = hstate_vma(vma);
	unsigned long sz = huge_page_size(h);
4090
	struct mmu_notifier_range range;
4091

D
David Gibson 已提交
4092
	WARN_ON(!is_vm_hugetlb_page(vma));
4093 4094
	BUG_ON(start & ~huge_page_mask(h));
	BUG_ON(end & ~huge_page_mask(h));
D
David Gibson 已提交
4095

4096 4097 4098 4099
	/*
	 * This is a hugetlb vma, all the pte entries should point
	 * to huge page.
	 */
4100
	tlb_change_page_size(tlb, sz);
4101
	tlb_start_vma(tlb, vma);
4102 4103 4104 4105

	/*
	 * If sharing possible, alert mmu notifiers of worst case.
	 */
4106 4107
	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start,
				end);
4108 4109
	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
	mmu_notifier_invalidate_range_start(&range);
4110 4111
	address = start;
	for (; address < end; address += sz) {
4112
		ptep = huge_pte_offset(mm, address, sz);
A
Adam Litke 已提交
4113
		if (!ptep)
4114 4115
			continue;

4116
		ptl = huge_pte_lock(h, mm, ptep);
4117
		if (huge_pmd_unshare(mm, vma, &address, ptep)) {
4118
			spin_unlock(ptl);
4119 4120 4121 4122
			/*
			 * We just unmapped a page of PMDs by clearing a PUD.
			 * The caller's TLB flush range should cover this area.
			 */
4123 4124
			continue;
		}
4125

4126
		pte = huge_ptep_get(ptep);
4127 4128 4129 4130
		if (huge_pte_none(pte)) {
			spin_unlock(ptl);
			continue;
		}
4131 4132

		/*
4133 4134
		 * Migrating hugepage or HWPoisoned hugepage is already
		 * unmapped and its refcount is dropped, so just clear pte here.
4135
		 */
4136
		if (unlikely(!pte_present(pte))) {
4137
			huge_pte_clear(mm, address, ptep, sz);
4138 4139
			spin_unlock(ptl);
			continue;
4140
		}
4141 4142

		page = pte_page(pte);
4143 4144 4145 4146 4147 4148
		/*
		 * If a reference page is supplied, it is because a specific
		 * page is being unmapped, not a range. Ensure the page we
		 * are about to unmap is the actual page of interest.
		 */
		if (ref_page) {
4149 4150 4151 4152
			if (page != ref_page) {
				spin_unlock(ptl);
				continue;
			}
4153 4154 4155 4156 4157 4158 4159 4160
			/*
			 * Mark the VMA as having unmapped its page so that
			 * future faults in this VMA will fail rather than
			 * looking like data was lost
			 */
			set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
		}

4161
		pte = huge_ptep_get_and_clear(mm, address, ptep);
4162
		tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
4163
		if (huge_pte_dirty(pte))
4164
			set_page_dirty(page);
4165

4166
		hugetlb_count_sub(pages_per_huge_page(h), mm);
4167
		page_remove_rmap(page, true);
4168

4169
		spin_unlock(ptl);
4170
		tlb_remove_page_size(tlb, page, huge_page_size(h));
4171 4172 4173 4174 4175
		/*
		 * Bail out after unmapping reference page if supplied
		 */
		if (ref_page)
			break;
4176
	}
4177
	mmu_notifier_invalidate_range_end(&range);
4178
	tlb_end_vma(tlb, vma);
L
Linus Torvalds 已提交
4179
}
D
David Gibson 已提交
4180

4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192
void __unmap_hugepage_range_final(struct mmu_gather *tlb,
			  struct vm_area_struct *vma, unsigned long start,
			  unsigned long end, struct page *ref_page)
{
	__unmap_hugepage_range(tlb, vma, start, end, ref_page);

	/*
	 * Clear this flag so that x86's huge_pmd_share page_table_shareable
	 * test will fail on a vma being torn down, and not grab a page table
	 * on its way out.  We're lucky that the flag has such an appropriate
	 * name, and can in fact be safely cleared here. We could clear it
	 * before the __unmap_hugepage_range above, but all that's necessary
4193
	 * is to clear it before releasing the i_mmap_rwsem. This works
4194
	 * because in the context this is called, the VMA is about to be
4195
	 * destroyed and the i_mmap_rwsem is held.
4196 4197 4198 4199
	 */
	vma->vm_flags &= ~VM_MAYSHARE;
}

4200
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
4201
			  unsigned long end, struct page *ref_page)
4202
{
4203
	struct mmu_gather tlb;
4204

4205
	tlb_gather_mmu(&tlb, vma->vm_mm);
4206
	__unmap_hugepage_range(&tlb, vma, start, end, ref_page);
4207
	tlb_finish_mmu(&tlb);
4208 4209
}

4210 4211
/*
 * This is called when the original mapper is failing to COW a MAP_PRIVATE
4212
 * mapping it owns the reserve page for. The intention is to unmap the page
4213 4214 4215
 * from other VMAs and let the children be SIGKILLed if they are faulting the
 * same region.
 */
4216 4217
static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
			      struct page *page, unsigned long address)
4218
{
4219
	struct hstate *h = hstate_vma(vma);
4220 4221 4222 4223 4224 4225 4226 4227
	struct vm_area_struct *iter_vma;
	struct address_space *mapping;
	pgoff_t pgoff;

	/*
	 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
	 * from page cache lookup which is in HPAGE_SIZE units.
	 */
4228
	address = address & huge_page_mask(h);
4229 4230
	pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
			vma->vm_pgoff;
4231
	mapping = vma->vm_file->f_mapping;
4232

4233 4234 4235 4236 4237
	/*
	 * Take the mapping lock for the duration of the table walk. As
	 * this mapping should be shared between all the VMAs,
	 * __unmap_hugepage_range() is called as the lock is already held
	 */
4238
	i_mmap_lock_write(mapping);
4239
	vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
4240 4241 4242 4243
		/* Do not unmap the current VMA */
		if (iter_vma == vma)
			continue;

4244 4245 4246 4247 4248 4249 4250 4251
		/*
		 * Shared VMAs have their own reserves and do not affect
		 * MAP_PRIVATE accounting but it is possible that a shared
		 * VMA is using the same page so check and skip such VMAs.
		 */
		if (iter_vma->vm_flags & VM_MAYSHARE)
			continue;

4252 4253 4254 4255 4256 4257 4258 4259
		/*
		 * Unmap the page from other VMAs without their own reserves.
		 * They get marked to be SIGKILLed if they fault in these
		 * areas. This is because a future no-page fault on this VMA
		 * could insert a zeroed page instead of the data existing
		 * from the time of fork. This would look like data corruption
		 */
		if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
4260 4261
			unmap_hugepage_range(iter_vma, address,
					     address + huge_page_size(h), page);
4262
	}
4263
	i_mmap_unlock_write(mapping);
4264 4265
}

4266 4267
/*
 * Hugetlb_cow() should be called with page lock of the original hugepage held.
4268 4269 4270
 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
 * cannot race with other handlers or page migration.
 * Keep the pte_same checks anyway to make transition from the mutex easier.
4271
 */
4272
static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
4273
		       unsigned long address, pte_t *ptep,
4274
		       struct page *pagecache_page, spinlock_t *ptl)
4275
{
4276
	pte_t pte;
4277
	struct hstate *h = hstate_vma(vma);
4278
	struct page *old_page, *new_page;
4279 4280
	int outside_reserve = 0;
	vm_fault_t ret = 0;
4281
	unsigned long haddr = address & huge_page_mask(h);
4282
	struct mmu_notifier_range range;
4283

4284
	pte = huge_ptep_get(ptep);
4285 4286
	old_page = pte_page(pte);

4287
retry_avoidcopy:
4288 4289
	/* If no-one else is actually using this page, avoid the copy
	 * and just make the page writable */
4290
	if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
4291
		page_move_anon_rmap(old_page, vma);
4292
		set_huge_ptep_writable(vma, haddr, ptep);
N
Nick Piggin 已提交
4293
		return 0;
4294 4295
	}

4296 4297 4298 4299 4300 4301 4302 4303 4304
	/*
	 * If the process that created a MAP_PRIVATE mapping is about to
	 * perform a COW due to a shared page count, attempt to satisfy
	 * the allocation without using the existing reserves. The pagecache
	 * page is used to determine if the reserve at this address was
	 * consumed or not. If reserves were used, a partial faulted mapping
	 * at the time of fork() could consume its reserves on COW instead
	 * of the full address range.
	 */
4305
	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
4306 4307 4308
			old_page != pagecache_page)
		outside_reserve = 1;

4309
	get_page(old_page);
4310

4311 4312 4313 4314
	/*
	 * Drop page table lock as buddy allocator may be called. It will
	 * be acquired again before returning to the caller, as expected.
	 */
4315
	spin_unlock(ptl);
4316
	new_page = alloc_huge_page(vma, haddr, outside_reserve);
4317

4318
	if (IS_ERR(new_page)) {
4319 4320 4321 4322 4323 4324 4325 4326
		/*
		 * If a process owning a MAP_PRIVATE mapping fails to COW,
		 * it is due to references held by a child and an insufficient
		 * huge page pool. To guarantee the original mappers
		 * reliability, unmap the page from child processes. The child
		 * may get SIGKILLed if it later faults.
		 */
		if (outside_reserve) {
4327 4328 4329 4330
			struct address_space *mapping = vma->vm_file->f_mapping;
			pgoff_t idx;
			u32 hash;

4331
			put_page(old_page);
4332
			BUG_ON(huge_pte_none(pte));
4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346
			/*
			 * Drop hugetlb_fault_mutex and i_mmap_rwsem before
			 * unmapping.  unmapping needs to hold i_mmap_rwsem
			 * in write mode.  Dropping i_mmap_rwsem in read mode
			 * here is OK as COW mappings do not interact with
			 * PMD sharing.
			 *
			 * Reacquire both after unmap operation.
			 */
			idx = vma_hugecache_offset(h, vma, haddr);
			hash = hugetlb_fault_mutex_hash(mapping, idx);
			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
			i_mmap_unlock_read(mapping);

4347
			unmap_ref_private(mm, vma, old_page, haddr);
4348 4349 4350

			i_mmap_lock_read(mapping);
			mutex_lock(&hugetlb_fault_mutex_table[hash]);
4351
			spin_lock(ptl);
4352
			ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
4353 4354 4355 4356 4357 4358 4359 4360
			if (likely(ptep &&
				   pte_same(huge_ptep_get(ptep), pte)))
				goto retry_avoidcopy;
			/*
			 * race occurs while re-acquiring page table
			 * lock, and our job is done.
			 */
			return 0;
4361 4362
		}

4363
		ret = vmf_error(PTR_ERR(new_page));
4364
		goto out_release_old;
4365 4366
	}

4367 4368 4369 4370
	/*
	 * When the original hugepage is shared one, it does not have
	 * anon_vma prepared.
	 */
4371
	if (unlikely(anon_vma_prepare(vma))) {
4372 4373
		ret = VM_FAULT_OOM;
		goto out_release_all;
4374
	}
4375

4376
	copy_user_huge_page(new_page, old_page, address, vma,
A
Andrea Arcangeli 已提交
4377
			    pages_per_huge_page(h));
N
Nick Piggin 已提交
4378
	__SetPageUptodate(new_page);
4379

4380
	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, haddr,
4381
				haddr + huge_page_size(h));
4382
	mmu_notifier_invalidate_range_start(&range);
4383

4384
	/*
4385
	 * Retake the page table lock to check for racing updates
4386 4387
	 * before the page tables are altered
	 */
4388
	spin_lock(ptl);
4389
	ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
4390
	if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
4391
		ClearHPageRestoreReserve(new_page);
4392

4393
		/* Break COW */
4394
		huge_ptep_clear_flush(vma, haddr, ptep);
4395
		mmu_notifier_invalidate_range(mm, range.start, range.end);
4396
		set_huge_pte_at(mm, haddr, ptep,
4397
				make_huge_pte(vma, new_page, 1));
4398
		page_remove_rmap(old_page, true);
4399
		hugepage_add_new_anon_rmap(new_page, vma, haddr);
4400
		SetHPageMigratable(new_page);
4401 4402 4403
		/* Make the old page be freed below */
		new_page = old_page;
	}
4404
	spin_unlock(ptl);
4405
	mmu_notifier_invalidate_range_end(&range);
4406
out_release_all:
4407
	restore_reserve_on_error(h, vma, haddr, new_page);
4408
	put_page(new_page);
4409
out_release_old:
4410
	put_page(old_page);
4411

4412 4413
	spin_lock(ptl); /* Caller expects lock to be held */
	return ret;
4414 4415
}

4416
/* Return the pagecache page at a given address within a VMA */
4417 4418
static struct page *hugetlbfs_pagecache_page(struct hstate *h,
			struct vm_area_struct *vma, unsigned long address)
4419 4420
{
	struct address_space *mapping;
4421
	pgoff_t idx;
4422 4423

	mapping = vma->vm_file->f_mapping;
4424
	idx = vma_hugecache_offset(h, vma, address);
4425 4426 4427 4428

	return find_lock_page(mapping, idx);
}

H
Hugh Dickins 已提交
4429 4430 4431 4432 4433
/*
 * Return whether there is a pagecache page to back given address within VMA.
 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
 */
static bool hugetlbfs_pagecache_present(struct hstate *h,
H
Hugh Dickins 已提交
4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448
			struct vm_area_struct *vma, unsigned long address)
{
	struct address_space *mapping;
	pgoff_t idx;
	struct page *page;

	mapping = vma->vm_file->f_mapping;
	idx = vma_hugecache_offset(h, vma, address);

	page = find_get_page(mapping, idx);
	if (page)
		put_page(page);
	return page != NULL;
}

4449 4450 4451 4452 4453 4454 4455 4456 4457
int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
			   pgoff_t idx)
{
	struct inode *inode = mapping->host;
	struct hstate *h = hstate_inode(inode);
	int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);

	if (err)
		return err;
4458
	ClearHPageRestoreReserve(page);
4459

4460 4461 4462 4463 4464 4465
	/*
	 * set page dirty so that it will not be removed from cache/file
	 * by non-hugetlbfs specific code paths.
	 */
	set_page_dirty(page);

4466 4467 4468 4469 4470 4471
	spin_lock(&inode->i_lock);
	inode->i_blocks += blocks_per_huge_page(h);
	spin_unlock(&inode->i_lock);
	return 0;
}

4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509
static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma,
						  struct address_space *mapping,
						  pgoff_t idx,
						  unsigned int flags,
						  unsigned long haddr,
						  unsigned long reason)
{
	vm_fault_t ret;
	u32 hash;
	struct vm_fault vmf = {
		.vma = vma,
		.address = haddr,
		.flags = flags,

		/*
		 * Hard to debug if it ends up being
		 * used by a callee that assumes
		 * something about the other
		 * uninitialized fields... same as in
		 * memory.c
		 */
	};

	/*
	 * hugetlb_fault_mutex and i_mmap_rwsem must be
	 * dropped before handling userfault.  Reacquire
	 * after handling fault to make calling code simpler.
	 */
	hash = hugetlb_fault_mutex_hash(mapping, idx);
	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
	i_mmap_unlock_read(mapping);
	ret = handle_userfault(&vmf, reason);
	i_mmap_lock_read(mapping);
	mutex_lock(&hugetlb_fault_mutex_table[hash]);

	return ret;
}

4510 4511 4512 4513
static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
			struct vm_area_struct *vma,
			struct address_space *mapping, pgoff_t idx,
			unsigned long address, pte_t *ptep, unsigned int flags)
4514
{
4515
	struct hstate *h = hstate_vma(vma);
4516
	vm_fault_t ret = VM_FAULT_SIGBUS;
4517
	int anon_rmap = 0;
A
Adam Litke 已提交
4518 4519
	unsigned long size;
	struct page *page;
4520
	pte_t new_pte;
4521
	spinlock_t *ptl;
4522
	unsigned long haddr = address & huge_page_mask(h);
4523
	bool new_page = false;
A
Adam Litke 已提交
4524

4525 4526 4527
	/*
	 * Currently, we are forced to kill the process in the event the
	 * original mapper has unmapped pages from the child due to a failed
L
Lucas De Marchi 已提交
4528
	 * COW. Warn that such a situation has occurred as it may not be obvious
4529 4530
	 */
	if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
4531
		pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
4532
			   current->pid);
4533 4534 4535
		return ret;
	}

A
Adam Litke 已提交
4536
	/*
4537 4538 4539
	 * We can not race with truncation due to holding i_mmap_rwsem.
	 * i_size is modified when holding i_mmap_rwsem, so check here
	 * once for faults beyond end of file.
A
Adam Litke 已提交
4540
	 */
4541 4542 4543 4544
	size = i_size_read(mapping->host) >> huge_page_shift(h);
	if (idx >= size)
		goto out;

4545 4546 4547
retry:
	page = find_lock_page(mapping, idx);
	if (!page) {
4548
		/* Check for page in userfault range */
4549
		if (userfaultfd_missing(vma)) {
4550 4551 4552
			ret = hugetlb_handle_userfault(vma, mapping, idx,
						       flags, haddr,
						       VM_UFFD_MISSING);
4553 4554 4555
			goto out;
		}

4556
		page = alloc_huge_page(vma, haddr, 0);
4557
		if (IS_ERR(page)) {
4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570
			/*
			 * Returning error will result in faulting task being
			 * sent SIGBUS.  The hugetlb fault mutex prevents two
			 * tasks from racing to fault in the same page which
			 * could result in false unable to allocate errors.
			 * Page migration does not take the fault mutex, but
			 * does a clear then write of pte's under page table
			 * lock.  Page fault code could race with migration,
			 * notice the clear pte and try to allocate a page
			 * here.  Before returning error, get ptl and make
			 * sure there really is no pte entry.
			 */
			ptl = huge_pte_lock(h, mm, ptep);
4571 4572 4573
			ret = 0;
			if (huge_pte_none(huge_ptep_get(ptep)))
				ret = vmf_error(PTR_ERR(page));
4574
			spin_unlock(ptl);
4575 4576
			goto out;
		}
A
Andrea Arcangeli 已提交
4577
		clear_huge_page(page, address, pages_per_huge_page(h));
N
Nick Piggin 已提交
4578
		__SetPageUptodate(page);
4579
		new_page = true;
4580

4581
		if (vma->vm_flags & VM_MAYSHARE) {
4582
			int err = huge_add_to_page_cache(page, mapping, idx);
4583 4584 4585 4586 4587 4588
			if (err) {
				put_page(page);
				if (err == -EEXIST)
					goto retry;
				goto out;
			}
4589
		} else {
4590
			lock_page(page);
4591 4592 4593 4594
			if (unlikely(anon_vma_prepare(vma))) {
				ret = VM_FAULT_OOM;
				goto backout_unlocked;
			}
4595
			anon_rmap = 1;
4596
		}
4597
	} else {
4598 4599 4600 4601 4602 4603
		/*
		 * If memory error occurs between mmap() and fault, some process
		 * don't have hwpoisoned swap entry for errored virtual address.
		 * So we need to block hugepage fault by PG_hwpoison bit check.
		 */
		if (unlikely(PageHWPoison(page))) {
4604
			ret = VM_FAULT_HWPOISON_LARGE |
4605
				VM_FAULT_SET_HINDEX(hstate_index(h));
4606 4607
			goto backout_unlocked;
		}
4608 4609 4610 4611 4612 4613 4614 4615 4616 4617

		/* Check for page in userfault range. */
		if (userfaultfd_minor(vma)) {
			unlock_page(page);
			put_page(page);
			ret = hugetlb_handle_userfault(vma, mapping, idx,
						       flags, haddr,
						       VM_UFFD_MINOR);
			goto out;
		}
4618
	}
4619

4620 4621 4622 4623 4624 4625
	/*
	 * If we are going to COW a private mapping later, we examine the
	 * pending reservations for this page now. This will ensure that
	 * any allocations necessary to record that reservation occur outside
	 * the spinlock.
	 */
4626
	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
4627
		if (vma_needs_reservation(h, vma, haddr) < 0) {
4628 4629 4630
			ret = VM_FAULT_OOM;
			goto backout_unlocked;
		}
4631
		/* Just decrements count, does not deallocate */
4632
		vma_end_reservation(h, vma, haddr);
4633
	}
4634

4635
	ptl = huge_pte_lock(h, mm, ptep);
N
Nick Piggin 已提交
4636
	ret = 0;
4637
	if (!huge_pte_none(huge_ptep_get(ptep)))
A
Adam Litke 已提交
4638 4639
		goto backout;

4640
	if (anon_rmap) {
4641
		ClearHPageRestoreReserve(page);
4642
		hugepage_add_new_anon_rmap(page, vma, haddr);
4643
	} else
4644
		page_dup_rmap(page, true);
4645 4646
	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
				&& (vma->vm_flags & VM_SHARED)));
4647
	set_huge_pte_at(mm, haddr, ptep, new_pte);
4648

4649
	hugetlb_count_add(pages_per_huge_page(h), mm);
4650
	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
4651
		/* Optimization, do the COW without a second fault */
4652
		ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
4653 4654
	}

4655
	spin_unlock(ptl);
4656 4657

	/*
4658 4659 4660
	 * Only set HPageMigratable in newly allocated pages.  Existing pages
	 * found in the pagecache may not have HPageMigratableset if they have
	 * been isolated for migration.
4661 4662
	 */
	if (new_page)
4663
		SetHPageMigratable(page);
4664

A
Adam Litke 已提交
4665 4666
	unlock_page(page);
out:
4667
	return ret;
A
Adam Litke 已提交
4668 4669

backout:
4670
	spin_unlock(ptl);
4671
backout_unlocked:
A
Adam Litke 已提交
4672
	unlock_page(page);
4673
	restore_reserve_on_error(h, vma, haddr, page);
A
Adam Litke 已提交
4674 4675
	put_page(page);
	goto out;
4676 4677
}

4678
#ifdef CONFIG_SMP
4679
u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
4680 4681 4682 4683
{
	unsigned long key[2];
	u32 hash;

4684 4685
	key[0] = (unsigned long) mapping;
	key[1] = idx;
4686

4687
	hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
4688 4689 4690 4691 4692

	return hash & (num_fault_mutexes - 1);
}
#else
/*
M
Miaohe Lin 已提交
4693
 * For uniprocessor systems we always use a single mutex, so just
4694 4695
 * return 0 and avoid the hashing overhead.
 */
4696
u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
4697 4698 4699 4700 4701
{
	return 0;
}
#endif

4702
vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
4703
			unsigned long address, unsigned int flags)
4704
{
4705
	pte_t *ptep, entry;
4706
	spinlock_t *ptl;
4707
	vm_fault_t ret;
4708 4709
	u32 hash;
	pgoff_t idx;
4710
	struct page *page = NULL;
4711
	struct page *pagecache_page = NULL;
4712
	struct hstate *h = hstate_vma(vma);
4713
	struct address_space *mapping;
4714
	int need_wait_lock = 0;
4715
	unsigned long haddr = address & huge_page_mask(h);
4716

4717
	ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
4718
	if (ptep) {
4719 4720 4721 4722 4723
		/*
		 * Since we hold no locks, ptep could be stale.  That is
		 * OK as we are only making decisions based on content and
		 * not actually modifying content here.
		 */
4724
		entry = huge_ptep_get(ptep);
N
Naoya Horiguchi 已提交
4725
		if (unlikely(is_hugetlb_entry_migration(entry))) {
4726
			migration_entry_wait_huge(vma, mm, ptep);
N
Naoya Horiguchi 已提交
4727 4728
			return 0;
		} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
4729
			return VM_FAULT_HWPOISON_LARGE |
4730
				VM_FAULT_SET_HINDEX(hstate_index(h));
4731 4732
	}

4733 4734
	/*
	 * Acquire i_mmap_rwsem before calling huge_pte_alloc and hold
4735 4736 4737 4738
	 * until finished with ptep.  This serves two purposes:
	 * 1) It prevents huge_pmd_unshare from being called elsewhere
	 *    and making the ptep no longer valid.
	 * 2) It synchronizes us with i_size modifications during truncation.
4739 4740 4741 4742 4743
	 *
	 * ptep could have already be assigned via huge_pte_offset.  That
	 * is OK, as huge_pte_alloc will return the same value unless
	 * something has changed.
	 */
4744
	mapping = vma->vm_file->f_mapping;
4745
	i_mmap_lock_read(mapping);
4746
	ptep = huge_pte_alloc(mm, vma, haddr, huge_page_size(h));
4747 4748 4749 4750
	if (!ptep) {
		i_mmap_unlock_read(mapping);
		return VM_FAULT_OOM;
	}
4751

4752 4753 4754 4755 4756
	/*
	 * Serialize hugepage allocation and instantiation, so that we don't
	 * get spurious allocation failures if two CPUs race to instantiate
	 * the same page in the page cache.
	 */
4757
	idx = vma_hugecache_offset(h, vma, haddr);
4758
	hash = hugetlb_fault_mutex_hash(mapping, idx);
4759
	mutex_lock(&hugetlb_fault_mutex_table[hash]);
4760

4761 4762
	entry = huge_ptep_get(ptep);
	if (huge_pte_none(entry)) {
4763
		ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
4764
		goto out_mutex;
4765
	}
4766

N
Nick Piggin 已提交
4767
	ret = 0;
4768

4769 4770 4771
	/*
	 * entry could be a migration/hwpoison entry at this point, so this
	 * check prevents the kernel from going below assuming that we have
E
Ethon Paul 已提交
4772 4773 4774
	 * an active hugepage in pagecache. This goto expects the 2nd page
	 * fault, and is_hugetlb_entry_(migration|hwpoisoned) check will
	 * properly handle it.
4775 4776 4777 4778
	 */
	if (!pte_present(entry))
		goto out_mutex;

4779 4780 4781 4782 4783 4784 4785 4786
	/*
	 * If we are going to COW the mapping later, we examine the pending
	 * reservations for this page now. This will ensure that any
	 * allocations necessary to record that reservation occur outside the
	 * spinlock. For private mappings, we also lookup the pagecache
	 * page now as it is used to determine if a reservation has been
	 * consumed.
	 */
4787
	if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
4788
		if (vma_needs_reservation(h, vma, haddr) < 0) {
4789
			ret = VM_FAULT_OOM;
4790
			goto out_mutex;
4791
		}
4792
		/* Just decrements count, does not deallocate */
4793
		vma_end_reservation(h, vma, haddr);
4794

4795
		if (!(vma->vm_flags & VM_MAYSHARE))
4796
			pagecache_page = hugetlbfs_pagecache_page(h,
4797
								vma, haddr);
4798 4799
	}

4800 4801 4802 4803 4804 4805
	ptl = huge_pte_lock(h, mm, ptep);

	/* Check for a racing update before calling hugetlb_cow */
	if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
		goto out_ptl;

4806 4807 4808 4809 4810 4811 4812
	/*
	 * hugetlb_cow() requires page locks of pte_page(entry) and
	 * pagecache_page, so here we need take the former one
	 * when page != pagecache_page or !pagecache_page.
	 */
	page = pte_page(entry);
	if (page != pagecache_page)
4813 4814 4815 4816
		if (!trylock_page(page)) {
			need_wait_lock = 1;
			goto out_ptl;
		}
4817

4818
	get_page(page);
4819

4820
	if (flags & FAULT_FLAG_WRITE) {
4821
		if (!huge_pte_write(entry)) {
4822
			ret = hugetlb_cow(mm, vma, address, ptep,
4823
					  pagecache_page, ptl);
4824
			goto out_put_page;
4825
		}
4826
		entry = huge_pte_mkdirty(entry);
4827 4828
	}
	entry = pte_mkyoung(entry);
4829
	if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
4830
						flags & FAULT_FLAG_WRITE))
4831
		update_mmu_cache(vma, haddr, ptep);
4832 4833 4834 4835
out_put_page:
	if (page != pagecache_page)
		unlock_page(page);
	put_page(page);
4836 4837
out_ptl:
	spin_unlock(ptl);
4838 4839 4840 4841 4842

	if (pagecache_page) {
		unlock_page(pagecache_page);
		put_page(pagecache_page);
	}
4843
out_mutex:
4844
	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4845
	i_mmap_unlock_read(mapping);
4846 4847 4848 4849 4850 4851 4852 4853 4854
	/*
	 * Generally it's safe to hold refcount during waiting page lock. But
	 * here we just wait to defer the next page fault to avoid busy loop and
	 * the page is not used after unlocked before returning from the current
	 * page fault. So we are safe from accessing freed page, even if we wait
	 * here without taking refcount.
	 */
	if (need_wait_lock)
		wait_on_page_locked(page);
4855
	return ret;
4856 4857
}

4858
#ifdef CONFIG_USERFAULTFD
4859 4860 4861 4862 4863 4864 4865 4866 4867
/*
 * Used by userfaultfd UFFDIO_COPY.  Based on mcopy_atomic_pte with
 * modifications for huge pages.
 */
int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
			    pte_t *dst_pte,
			    struct vm_area_struct *dst_vma,
			    unsigned long dst_addr,
			    unsigned long src_addr,
4868
			    enum mcopy_atomic_mode mode,
4869 4870
			    struct page **pagep)
{
4871
	bool is_continue = (mode == MCOPY_ATOMIC_CONTINUE);
4872 4873 4874
	struct address_space *mapping;
	pgoff_t idx;
	unsigned long size;
4875
	int vm_shared = dst_vma->vm_flags & VM_SHARED;
4876 4877 4878 4879 4880
	struct hstate *h = hstate_vma(dst_vma);
	pte_t _dst_pte;
	spinlock_t *ptl;
	int ret;
	struct page *page;
4881
	int writable;
4882

4883 4884 4885 4886 4887 4888 4889 4890 4891
	mapping = dst_vma->vm_file->f_mapping;
	idx = vma_hugecache_offset(h, dst_vma, dst_addr);

	if (is_continue) {
		ret = -EFAULT;
		page = find_lock_page(mapping, idx);
		if (!page)
			goto out;
	} else if (!*pagep) {
4892 4893 4894 4895 4896 4897 4898 4899 4900
		/* If a page already exists, then it's UFFDIO_COPY for
		 * a non-missing case. Return -EEXIST.
		 */
		if (vm_shared &&
		    hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
			ret = -EEXIST;
			goto out;
		}

4901
		page = alloc_huge_page(dst_vma, dst_addr, 0);
4902 4903
		if (IS_ERR(page)) {
			ret = -ENOMEM;
4904
			goto out;
4905
		}
4906 4907 4908

		ret = copy_huge_page_from_user(page,
						(const void __user *) src_addr,
4909
						pages_per_huge_page(h), false);
4910

4911
		/* fallback to copy_from_user outside mmap_lock */
4912
		if (unlikely(ret)) {
4913
			ret = -ENOENT;
4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929
			*pagep = page;
			/* don't free the page */
			goto out;
		}
	} else {
		page = *pagep;
		*pagep = NULL;
	}

	/*
	 * The memory barrier inside __SetPageUptodate makes sure that
	 * preceding stores to the page contents become visible before
	 * the set_pte_at() write.
	 */
	__SetPageUptodate(page);

4930 4931
	/* Add shared, newly allocated pages to the page cache. */
	if (vm_shared && !is_continue) {
4932 4933 4934 4935
		size = i_size_read(mapping->host) >> huge_page_shift(h);
		ret = -EFAULT;
		if (idx >= size)
			goto out_release_nounlock;
4936

4937 4938 4939 4940 4941 4942
		/*
		 * Serialization between remove_inode_hugepages() and
		 * huge_add_to_page_cache() below happens through the
		 * hugetlb_fault_mutex_table that here must be hold by
		 * the caller.
		 */
4943 4944 4945 4946 4947
		ret = huge_add_to_page_cache(page, mapping, idx);
		if (ret)
			goto out_release_nounlock;
	}

4948 4949 4950
	ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
	spin_lock(ptl);

4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964
	/*
	 * Recheck the i_size after holding PT lock to make sure not
	 * to leave any page mapped (as page_mapped()) beyond the end
	 * of the i_size (remove_inode_hugepages() is strict about
	 * enforcing that). If we bail out here, we'll also leave a
	 * page in the radix tree in the vm_shared case beyond the end
	 * of the i_size, but remove_inode_hugepages() will take care
	 * of it as soon as we drop the hugetlb_fault_mutex_table.
	 */
	size = i_size_read(mapping->host) >> huge_page_shift(h);
	ret = -EFAULT;
	if (idx >= size)
		goto out_release_unlock;

4965 4966 4967 4968
	ret = -EEXIST;
	if (!huge_pte_none(huge_ptep_get(dst_pte)))
		goto out_release_unlock;

4969 4970 4971
	if (vm_shared) {
		page_dup_rmap(page, true);
	} else {
4972
		ClearHPageRestoreReserve(page);
4973 4974
		hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
	}
4975

4976 4977 4978 4979 4980 4981 4982 4983
	/* For CONTINUE on a non-shared VMA, don't set VM_WRITE for CoW. */
	if (is_continue && !vm_shared)
		writable = 0;
	else
		writable = dst_vma->vm_flags & VM_WRITE;

	_dst_pte = make_huge_pte(dst_vma, page, writable);
	if (writable)
4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996
		_dst_pte = huge_pte_mkdirty(_dst_pte);
	_dst_pte = pte_mkyoung(_dst_pte);

	set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);

	(void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte,
					dst_vma->vm_flags & VM_WRITE);
	hugetlb_count_add(pages_per_huge_page(h), dst_mm);

	/* No need to invalidate - it was non-present before */
	update_mmu_cache(dst_vma, dst_addr, dst_pte);

	spin_unlock(ptl);
4997 4998 4999
	if (!is_continue)
		SetHPageMigratable(page);
	if (vm_shared || is_continue)
5000
		unlock_page(page);
5001 5002 5003 5004 5005
	ret = 0;
out:
	return ret;
out_release_unlock:
	spin_unlock(ptl);
5006
	if (vm_shared || is_continue)
5007
		unlock_page(page);
5008
out_release_nounlock:
5009 5010 5011
	put_page(page);
	goto out;
}
5012
#endif /* CONFIG_USERFAULTFD */
5013

5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027
static void record_subpages_vmas(struct page *page, struct vm_area_struct *vma,
				 int refs, struct page **pages,
				 struct vm_area_struct **vmas)
{
	int nr;

	for (nr = 0; nr < refs; nr++) {
		if (likely(pages))
			pages[nr] = mem_map_offset(page, nr);
		if (vmas)
			vmas[nr] = vma;
	}
}

5028 5029 5030
long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
			 struct page **pages, struct vm_area_struct **vmas,
			 unsigned long *position, unsigned long *nr_pages,
5031
			 long i, unsigned int flags, int *locked)
D
David Gibson 已提交
5032
{
5033 5034
	unsigned long pfn_offset;
	unsigned long vaddr = *position;
5035
	unsigned long remainder = *nr_pages;
5036
	struct hstate *h = hstate_vma(vma);
5037
	int err = -EFAULT, refs;
D
David Gibson 已提交
5038 5039

	while (vaddr < vma->vm_end && remainder) {
A
Adam Litke 已提交
5040
		pte_t *pte;
5041
		spinlock_t *ptl = NULL;
H
Hugh Dickins 已提交
5042
		int absent;
A
Adam Litke 已提交
5043
		struct page *page;
D
David Gibson 已提交
5044

5045 5046 5047 5048
		/*
		 * If we have a pending SIGKILL, don't keep faulting pages and
		 * potentially allocating memory.
		 */
5049
		if (fatal_signal_pending(current)) {
5050 5051 5052 5053
			remainder = 0;
			break;
		}

A
Adam Litke 已提交
5054 5055
		/*
		 * Some archs (sparc64, sh*) have multiple pte_ts to
H
Hugh Dickins 已提交
5056
		 * each hugepage.  We have to make sure we get the
A
Adam Litke 已提交
5057
		 * first, for the page indexing below to work.
5058 5059
		 *
		 * Note that page table lock is not held when pte is null.
A
Adam Litke 已提交
5060
		 */
5061 5062
		pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
				      huge_page_size(h));
5063 5064
		if (pte)
			ptl = huge_pte_lock(h, mm, pte);
H
Hugh Dickins 已提交
5065 5066 5067 5068
		absent = !pte || huge_pte_none(huge_ptep_get(pte));

		/*
		 * When coredumping, it suits get_dump_page if we just return
H
Hugh Dickins 已提交
5069 5070 5071 5072
		 * an error where there's an empty slot with no huge pagecache
		 * to back it.  This way, we avoid allocating a hugepage, and
		 * the sparse dumpfile avoids allocating disk blocks, but its
		 * huge holes still show up with zeroes where they need to be.
H
Hugh Dickins 已提交
5073
		 */
H
Hugh Dickins 已提交
5074 5075
		if (absent && (flags & FOLL_DUMP) &&
		    !hugetlbfs_pagecache_present(h, vma, vaddr)) {
5076 5077
			if (pte)
				spin_unlock(ptl);
H
Hugh Dickins 已提交
5078 5079 5080
			remainder = 0;
			break;
		}
D
David Gibson 已提交
5081

5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092
		/*
		 * We need call hugetlb_fault for both hugepages under migration
		 * (in which case hugetlb_fault waits for the migration,) and
		 * hwpoisoned hugepages (in which case we need to prevent the
		 * caller from accessing to them.) In order to do this, we use
		 * here is_swap_pte instead of is_hugetlb_entry_migration and
		 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
		 * both cases, and because we can't follow correct pages
		 * directly from any kind of swap entries.
		 */
		if (absent || is_swap_pte(huge_ptep_get(pte)) ||
5093 5094
		    ((flags & FOLL_WRITE) &&
		      !huge_pte_write(huge_ptep_get(pte)))) {
5095
			vm_fault_t ret;
5096
			unsigned int fault_flags = 0;
D
David Gibson 已提交
5097

5098 5099
			if (pte)
				spin_unlock(ptl);
5100 5101
			if (flags & FOLL_WRITE)
				fault_flags |= FAULT_FLAG_WRITE;
5102
			if (locked)
5103 5104
				fault_flags |= FAULT_FLAG_ALLOW_RETRY |
					FAULT_FLAG_KILLABLE;
5105 5106 5107 5108
			if (flags & FOLL_NOWAIT)
				fault_flags |= FAULT_FLAG_ALLOW_RETRY |
					FAULT_FLAG_RETRY_NOWAIT;
			if (flags & FOLL_TRIED) {
5109 5110 5111 5112
				/*
				 * Note: FAULT_FLAG_ALLOW_RETRY and
				 * FAULT_FLAG_TRIED can co-exist
				 */
5113 5114 5115 5116
				fault_flags |= FAULT_FLAG_TRIED;
			}
			ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
			if (ret & VM_FAULT_ERROR) {
5117
				err = vm_fault_to_errno(ret, flags);
5118 5119 5120 5121
				remainder = 0;
				break;
			}
			if (ret & VM_FAULT_RETRY) {
5122
				if (locked &&
5123
				    !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
5124
					*locked = 0;
5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137
				*nr_pages = 0;
				/*
				 * VM_FAULT_RETRY must not return an
				 * error, it will return zero
				 * instead.
				 *
				 * No need to update "position" as the
				 * caller will not check it after
				 * *nr_pages is set to 0.
				 */
				return i;
			}
			continue;
A
Adam Litke 已提交
5138 5139
		}

5140
		pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
5141
		page = pte_page(huge_ptep_get(pte));
5142

5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156
		/*
		 * If subpage information not requested, update counters
		 * and skip the same_page loop below.
		 */
		if (!pages && !vmas && !pfn_offset &&
		    (vaddr + huge_page_size(h) < vma->vm_end) &&
		    (remainder >= pages_per_huge_page(h))) {
			vaddr += huge_page_size(h);
			remainder -= pages_per_huge_page(h);
			i += pages_per_huge_page(h);
			spin_unlock(ptl);
			continue;
		}

5157 5158
		refs = min3(pages_per_huge_page(h) - pfn_offset,
			    (vma->vm_end - vaddr) >> PAGE_SHIFT, remainder);
5159

5160 5161 5162 5163 5164
		if (pages || vmas)
			record_subpages_vmas(mem_map_offset(page, pfn_offset),
					     vma, refs,
					     likely(pages) ? pages + i : NULL,
					     vmas ? vmas + i : NULL);
D
David Gibson 已提交
5165

5166
		if (pages) {
5167 5168 5169 5170 5171 5172 5173 5174 5175 5176
			/*
			 * try_grab_compound_head() should always succeed here,
			 * because: a) we hold the ptl lock, and b) we've just
			 * checked that the huge page is present in the page
			 * tables. If the huge page is present, then the tail
			 * pages must also be present. The ptl prevents the
			 * head page and tail pages from being rearranged in
			 * any way. So this page must be available at this
			 * point, unless the page refcount overflowed:
			 */
5177
			if (WARN_ON_ONCE(!try_grab_compound_head(pages[i],
5178 5179 5180 5181 5182 5183 5184
								 refs,
								 flags))) {
				spin_unlock(ptl);
				remainder = 0;
				err = -ENOMEM;
				break;
			}
5185
		}
5186 5187 5188 5189 5190

		vaddr += (refs << PAGE_SHIFT);
		remainder -= refs;
		i += refs;

5191
		spin_unlock(ptl);
D
David Gibson 已提交
5192
	}
5193
	*nr_pages = remainder;
5194 5195 5196 5197 5198
	/*
	 * setting position is actually required only if remainder is
	 * not zero but it's faster not to add a "if (remainder)"
	 * branch.
	 */
D
David Gibson 已提交
5199 5200
	*position = vaddr;

5201
	return i ? i : err;
D
David Gibson 已提交
5202
}
5203

5204
unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
5205 5206 5207 5208 5209 5210
		unsigned long address, unsigned long end, pgprot_t newprot)
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long start = address;
	pte_t *ptep;
	pte_t pte;
5211
	struct hstate *h = hstate_vma(vma);
5212
	unsigned long pages = 0;
5213
	bool shared_pmd = false;
5214
	struct mmu_notifier_range range;
5215 5216 5217

	/*
	 * In the case of shared PMDs, the area to flush could be beyond
5218
	 * start/end.  Set range.start/range.end to cover the maximum possible
5219 5220
	 * range if PMD sharing is possible.
	 */
5221 5222
	mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
				0, vma, mm, start, end);
5223
	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5224 5225

	BUG_ON(address >= end);
5226
	flush_cache_range(vma, range.start, range.end);
5227

5228
	mmu_notifier_invalidate_range_start(&range);
5229
	i_mmap_lock_write(vma->vm_file->f_mapping);
5230
	for (; address < end; address += huge_page_size(h)) {
5231
		spinlock_t *ptl;
5232
		ptep = huge_pte_offset(mm, address, huge_page_size(h));
5233 5234
		if (!ptep)
			continue;
5235
		ptl = huge_pte_lock(h, mm, ptep);
5236
		if (huge_pmd_unshare(mm, vma, &address, ptep)) {
5237
			pages++;
5238
			spin_unlock(ptl);
5239
			shared_pmd = true;
5240
			continue;
5241
		}
5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254
		pte = huge_ptep_get(ptep);
		if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
			spin_unlock(ptl);
			continue;
		}
		if (unlikely(is_hugetlb_entry_migration(pte))) {
			swp_entry_t entry = pte_to_swp_entry(pte);

			if (is_write_migration_entry(entry)) {
				pte_t newpte;

				make_migration_entry_read(&entry);
				newpte = swp_entry_to_pte(entry);
5255 5256
				set_huge_swap_pte_at(mm, address, ptep,
						     newpte, huge_page_size(h));
5257 5258 5259 5260 5261 5262
				pages++;
			}
			spin_unlock(ptl);
			continue;
		}
		if (!huge_pte_none(pte)) {
5263 5264 5265 5266
			pte_t old_pte;

			old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
			pte = pte_mkhuge(huge_pte_modify(old_pte, newprot));
5267
			pte = arch_make_huge_pte(pte, vma, NULL, 0);
5268
			huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
5269
			pages++;
5270
		}
5271
		spin_unlock(ptl);
5272
	}
5273
	/*
5274
	 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
5275
	 * may have cleared our pud entry and done put_page on the page table:
5276
	 * once we release i_mmap_rwsem, another task can do the final put_page
5277 5278
	 * and that page table be reused and filled with junk.  If we actually
	 * did unshare a page of pmds, flush the range corresponding to the pud.
5279
	 */
5280
	if (shared_pmd)
5281
		flush_hugetlb_tlb_range(vma, range.start, range.end);
5282 5283
	else
		flush_hugetlb_tlb_range(vma, start, end);
5284 5285 5286 5287
	/*
	 * No need to call mmu_notifier_invalidate_range() we are downgrading
	 * page table protection not changing it to point to a new page.
	 *
5288
	 * See Documentation/vm/mmu_notifier.rst
5289
	 */
5290
	i_mmap_unlock_write(vma->vm_file->f_mapping);
5291
	mmu_notifier_invalidate_range_end(&range);
5292 5293

	return pages << h->order;
5294 5295
}

5296 5297
/* Return true if reservation was successful, false otherwise.  */
bool hugetlb_reserve_pages(struct inode *inode,
5298
					long from, long to,
5299
					struct vm_area_struct *vma,
5300
					vm_flags_t vm_flags)
5301
{
5302
	long chg, add = -1;
5303
	struct hstate *h = hstate_inode(inode);
5304
	struct hugepage_subpool *spool = subpool_inode(inode);
5305
	struct resv_map *resv_map;
5306
	struct hugetlb_cgroup *h_cg = NULL;
5307
	long gbl_reserve, regions_needed = 0;
5308

5309 5310 5311
	/* This should never happen */
	if (from > to) {
		VM_WARN(1, "%s called with a negative range\n", __func__);
5312
		return false;
5313 5314
	}

5315 5316 5317
	/*
	 * Only apply hugepage reservation if asked. At fault time, an
	 * attempt will be made for VM_NORESERVE to allocate a page
5318
	 * without using reserves
5319
	 */
5320
	if (vm_flags & VM_NORESERVE)
5321
		return true;
5322

5323 5324 5325 5326 5327 5328
	/*
	 * Shared mappings base their reservation on the number of pages that
	 * are already allocated on behalf of the file. Private mappings need
	 * to reserve the full area even if read-only as mprotect() may be
	 * called to make the mapping read-write. Assume !vma is a shm mapping
	 */
5329
	if (!vma || vma->vm_flags & VM_MAYSHARE) {
5330 5331 5332 5333 5334
		/*
		 * resv_map can not be NULL as hugetlb_reserve_pages is only
		 * called for inodes for which resv_maps were created (see
		 * hugetlbfs_get_inode).
		 */
5335
		resv_map = inode_resv_map(inode);
5336

5337
		chg = region_chg(resv_map, from, to, &regions_needed);
5338 5339

	} else {
5340
		/* Private mapping. */
5341
		resv_map = resv_map_alloc();
5342
		if (!resv_map)
5343
			return false;
5344

5345
		chg = to - from;
5346

5347 5348 5349 5350
		set_vma_resv_map(vma, resv_map);
		set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
	}

5351
	if (chg < 0)
5352
		goto out_err;
5353

5354 5355
	if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
				chg * pages_per_huge_page(h), &h_cg) < 0)
5356 5357 5358 5359 5360 5361 5362 5363 5364
		goto out_err;

	if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) {
		/* For private mappings, the hugetlb_cgroup uncharge info hangs
		 * of the resv_map.
		 */
		resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h);
	}

5365 5366 5367 5368 5369 5370
	/*
	 * There must be enough pages in the subpool for the mapping. If
	 * the subpool has a minimum size, there may be some global
	 * reservations already in place (gbl_reserve).
	 */
	gbl_reserve = hugepage_subpool_get_pages(spool, chg);
5371
	if (gbl_reserve < 0)
5372
		goto out_uncharge_cgroup;
5373 5374

	/*
5375
	 * Check enough hugepages are available for the reservation.
5376
	 * Hand the pages back to the subpool if there are not
5377
	 */
5378
	if (hugetlb_acct_memory(h, gbl_reserve) < 0)
5379
		goto out_put_pages;
5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391

	/*
	 * Account for the reservations made. Shared mappings record regions
	 * that have reservations as they are shared by multiple VMAs.
	 * When the last VMA disappears, the region map says how much
	 * the reservation was and the page cache tells how much of
	 * the reservation was consumed. Private mappings are per-VMA and
	 * only the consumed reservations are tracked. When the VMA
	 * disappears, the original reservation is the VMA size and the
	 * consumed reservations are stored in the map. Hence, nothing
	 * else has to be done for private mappings here
	 */
5392
	if (!vma || vma->vm_flags & VM_MAYSHARE) {
5393
		add = region_add(resv_map, from, to, regions_needed, h, h_cg);
5394 5395 5396

		if (unlikely(add < 0)) {
			hugetlb_acct_memory(h, -gbl_reserve);
5397
			goto out_put_pages;
5398
		} else if (unlikely(chg > add)) {
5399 5400 5401 5402 5403 5404 5405 5406 5407
			/*
			 * pages in this range were added to the reserve
			 * map between region_chg and region_add.  This
			 * indicates a race with alloc_huge_page.  Adjust
			 * the subpool and reserve counts modified above
			 * based on the difference.
			 */
			long rsv_adjust;

5408 5409 5410 5411
			/*
			 * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the
			 * reference to h_cg->css. See comment below for detail.
			 */
5412 5413 5414 5415
			hugetlb_cgroup_uncharge_cgroup_rsvd(
				hstate_index(h),
				(chg - add) * pages_per_huge_page(h), h_cg);

5416 5417 5418
			rsv_adjust = hugepage_subpool_put_pages(spool,
								chg - add);
			hugetlb_acct_memory(h, -rsv_adjust);
5419 5420 5421 5422 5423 5424 5425 5426
		} else if (h_cg) {
			/*
			 * The file_regions will hold their own reference to
			 * h_cg->css. So we should release the reference held
			 * via hugetlb_cgroup_charge_cgroup_rsvd() when we are
			 * done.
			 */
			hugetlb_cgroup_put_rsvd_cgroup(h_cg);
5427 5428
		}
	}
5429 5430
	return true;

5431 5432 5433 5434 5435 5436
out_put_pages:
	/* put back original number of pages, chg */
	(void)hugepage_subpool_put_pages(spool, chg);
out_uncharge_cgroup:
	hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h),
					    chg * pages_per_huge_page(h), h_cg);
5437
out_err:
5438
	if (!vma || vma->vm_flags & VM_MAYSHARE)
5439 5440 5441 5442 5443
		/* Only call region_abort if the region_chg succeeded but the
		 * region_add failed or didn't run.
		 */
		if (chg >= 0 && add < 0)
			region_abort(resv_map, from, to, regions_needed);
J
Joonsoo Kim 已提交
5444 5445
	if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
		kref_put(&resv_map->refs, resv_map_release);
5446
	return false;
5447 5448
}

5449 5450
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
								long freed)
5451
{
5452
	struct hstate *h = hstate_inode(inode);
5453
	struct resv_map *resv_map = inode_resv_map(inode);
5454
	long chg = 0;
5455
	struct hugepage_subpool *spool = subpool_inode(inode);
5456
	long gbl_reserve;
K
Ken Chen 已提交
5457

5458 5459 5460 5461
	/*
	 * Since this routine can be called in the evict inode path for all
	 * hugetlbfs inodes, resv_map could be NULL.
	 */
5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472
	if (resv_map) {
		chg = region_del(resv_map, start, end);
		/*
		 * region_del() can fail in the rare case where a region
		 * must be split and another region descriptor can not be
		 * allocated.  If end == LONG_MAX, it will not fail.
		 */
		if (chg < 0)
			return chg;
	}

K
Ken Chen 已提交
5473
	spin_lock(&inode->i_lock);
5474
	inode->i_blocks -= (blocks_per_huge_page(h) * freed);
K
Ken Chen 已提交
5475 5476
	spin_unlock(&inode->i_lock);

5477 5478 5479
	/*
	 * If the subpool has a minimum size, the number of global
	 * reservations to be released may be adjusted.
5480 5481 5482
	 *
	 * Note that !resv_map implies freed == 0. So (chg - freed)
	 * won't go negative.
5483 5484 5485
	 */
	gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
	hugetlb_acct_memory(h, -gbl_reserve);
5486 5487

	return 0;
5488
}
5489

5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
static unsigned long page_table_shareable(struct vm_area_struct *svma,
				struct vm_area_struct *vma,
				unsigned long addr, pgoff_t idx)
{
	unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
				svma->vm_start;
	unsigned long sbase = saddr & PUD_MASK;
	unsigned long s_end = sbase + PUD_SIZE;

	/* Allow segments to share if only one is marked locked */
E
Eric B Munson 已提交
5501 5502
	unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
	unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
5503 5504 5505 5506 5507 5508 5509

	/*
	 * match the virtual addresses, permission and the alignment of the
	 * page table page.
	 */
	if (pmd_index(addr) != pmd_index(saddr) ||
	    vm_flags != svm_flags ||
5510
	    !range_in_vma(svma, sbase, s_end))
5511 5512 5513 5514 5515
		return 0;

	return saddr;
}

5516
static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
5517 5518 5519 5520 5521 5522 5523
{
	unsigned long base = addr & PUD_MASK;
	unsigned long end = base + PUD_SIZE;

	/*
	 * check on proper vm_flags and page table alignment
	 */
5524
	if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
5525 5526
		return true;
	return false;
5527 5528
}

5529 5530 5531 5532 5533 5534 5535 5536 5537
bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
{
#ifdef CONFIG_USERFAULTFD
	if (uffd_disable_huge_pmd_share(vma))
		return false;
#endif
	return vma_shareable(vma, addr);
}

5538 5539 5540 5541 5542 5543 5544 5545
/*
 * Determine if start,end range within vma could be mapped by shared pmd.
 * If yes, adjust start and end to cover range associated with possible
 * shared pmd mappings.
 */
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
				unsigned long *start, unsigned long *end)
{
5546 5547
	unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
		v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
5548

5549
	/*
I
Ingo Molnar 已提交
5550 5551
	 * vma needs to span at least one aligned PUD size, and the range
	 * must be at least partially within in.
5552 5553 5554
	 */
	if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
		(*end <= v_start) || (*start >= v_end))
5555 5556
		return;

5557
	/* Extend the range to be PUD aligned for a worst case scenario */
5558 5559
	if (*start > v_start)
		*start = ALIGN_DOWN(*start, PUD_SIZE);
5560

5561 5562
	if (*end < v_end)
		*end = ALIGN(*end, PUD_SIZE);
5563 5564
}

5565 5566 5567 5568
/*
 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
 * and returns the corresponding pte. While this is not necessary for the
 * !shared pmd case because we can allocate the pmd later as well, it makes the
5569 5570
 * code much cleaner.
 *
5571 5572 5573 5574 5575 5576 5577 5578 5579 5580
 * This routine must be called with i_mmap_rwsem held in at least read mode if
 * sharing is possible.  For hugetlbfs, this prevents removal of any page
 * table entries associated with the address space.  This is important as we
 * are setting up sharing based on existing page table entries (mappings).
 *
 * NOTE: This routine is only called from huge_pte_alloc.  Some callers of
 * huge_pte_alloc know that sharing is not possible and do not take
 * i_mmap_rwsem as a performance optimization.  This is handled by the
 * if !vma_shareable check at the beginning of the routine. i_mmap_rwsem is
 * only required for subsequent processing.
5581
 */
5582 5583
pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
		      unsigned long addr, pud_t *pud)
5584 5585 5586 5587 5588 5589 5590 5591
{
	struct address_space *mapping = vma->vm_file->f_mapping;
	pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
			vma->vm_pgoff;
	struct vm_area_struct *svma;
	unsigned long saddr;
	pte_t *spte = NULL;
	pte_t *pte;
5592
	spinlock_t *ptl;
5593

5594
	i_mmap_assert_locked(mapping);
5595 5596 5597 5598 5599 5600
	vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
		if (svma == vma)
			continue;

		saddr = page_table_shareable(svma, vma, addr, idx);
		if (saddr) {
5601 5602
			spte = huge_pte_offset(svma->vm_mm, saddr,
					       vma_mmu_pagesize(svma));
5603 5604 5605 5606 5607 5608 5609 5610 5611 5612
			if (spte) {
				get_page(virt_to_page(spte));
				break;
			}
		}
	}

	if (!spte)
		goto out;

5613
	ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
5614
	if (pud_none(*pud)) {
5615 5616
		pud_populate(mm, pud,
				(pmd_t *)((unsigned long)spte & PAGE_MASK));
5617
		mm_inc_nr_pmds(mm);
5618
	} else {
5619
		put_page(virt_to_page(spte));
5620
	}
5621
	spin_unlock(ptl);
5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633
out:
	pte = (pte_t *)pmd_alloc(mm, pud, addr);
	return pte;
}

/*
 * unmap huge page backed by shared pte.
 *
 * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
 * indicated by page_count > 1, unmap is achieved by clearing pud and
 * decrementing the ref count. If count == 1, the pte page is not shared.
 *
5634
 * Called with page table lock held and i_mmap_rwsem held in write mode.
5635 5636 5637 5638
 *
 * returns: 1 successfully unmapped a shared pte page
 *	    0 the underlying pte page is not shared, or it is the last user
 */
5639 5640
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
					unsigned long *addr, pte_t *ptep)
5641 5642
{
	pgd_t *pgd = pgd_offset(mm, *addr);
5643 5644
	p4d_t *p4d = p4d_offset(pgd, *addr);
	pud_t *pud = pud_offset(p4d, *addr);
5645

5646
	i_mmap_assert_write_locked(vma->vm_file->f_mapping);
5647 5648 5649 5650 5651 5652
	BUG_ON(page_count(virt_to_page(ptep)) == 0);
	if (page_count(virt_to_page(ptep)) == 1)
		return 0;

	pud_clear(pud);
	put_page(virt_to_page(ptep));
5653
	mm_dec_nr_pmds(mm);
5654 5655 5656
	*addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
	return 1;
}
5657

5658
#else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
5659 5660
pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
		      unsigned long addr, pud_t *pud)
5661 5662 5663
{
	return NULL;
}
5664

5665 5666
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
				unsigned long *addr, pte_t *ptep)
5667 5668 5669
{
	return 0;
}
5670 5671 5672 5673 5674

void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
				unsigned long *start, unsigned long *end)
{
}
5675 5676 5677 5678 5679

bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
{
	return false;
}
5680 5681
#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */

5682
#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
5683
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
5684 5685 5686
			unsigned long addr, unsigned long sz)
{
	pgd_t *pgd;
5687
	p4d_t *p4d;
5688 5689 5690 5691
	pud_t *pud;
	pte_t *pte = NULL;

	pgd = pgd_offset(mm, addr);
5692 5693 5694
	p4d = p4d_alloc(mm, pgd, addr);
	if (!p4d)
		return NULL;
5695
	pud = pud_alloc(mm, p4d, addr);
5696 5697 5698 5699 5700
	if (pud) {
		if (sz == PUD_SIZE) {
			pte = (pte_t *)pud;
		} else {
			BUG_ON(sz != PMD_SIZE);
5701
			if (want_pmd_share(vma, addr) && pud_none(*pud))
5702
				pte = huge_pmd_share(mm, vma, addr, pud);
5703 5704 5705 5706
			else
				pte = (pte_t *)pmd_alloc(mm, pud, addr);
		}
	}
5707
	BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
5708 5709 5710 5711

	return pte;
}

5712 5713 5714 5715
/*
 * huge_pte_offset() - Walk the page table to resolve the hugepage
 * entry at address @addr
 *
5716 5717
 * Return: Pointer to page table entry (PUD or PMD) for
 * address @addr, or NULL if a !p*d_present() entry is encountered and the
5718 5719 5720
 * size @sz doesn't match the hugepage size at this level of the page
 * table.
 */
5721 5722
pte_t *huge_pte_offset(struct mm_struct *mm,
		       unsigned long addr, unsigned long sz)
5723 5724
{
	pgd_t *pgd;
5725
	p4d_t *p4d;
5726 5727
	pud_t *pud;
	pmd_t *pmd;
5728 5729

	pgd = pgd_offset(mm, addr);
5730 5731 5732 5733 5734
	if (!pgd_present(*pgd))
		return NULL;
	p4d = p4d_offset(pgd, addr);
	if (!p4d_present(*p4d))
		return NULL;
5735

5736
	pud = pud_offset(p4d, addr);
5737 5738
	if (sz == PUD_SIZE)
		/* must be pud huge, non-present or none */
5739
		return (pte_t *)pud;
5740
	if (!pud_present(*pud))
5741
		return NULL;
5742
	/* must have a valid entry and size to go further */
5743

5744 5745 5746
	pmd = pmd_offset(pud, addr);
	/* must be pmd huge, non-present or none */
	return (pte_t *)pmd;
5747 5748
}

5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761
#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */

/*
 * These functions are overwritable if your architecture needs its own
 * behavior.
 */
struct page * __weak
follow_huge_addr(struct mm_struct *mm, unsigned long address,
			      int write)
{
	return ERR_PTR(-EINVAL);
}

5762 5763 5764 5765 5766 5767 5768 5769
struct page * __weak
follow_huge_pd(struct vm_area_struct *vma,
	       unsigned long address, hugepd_t hpd, int flags, int pdshift)
{
	WARN(1, "hugepd follow called with no support for hugepage directory format\n");
	return NULL;
}

5770
struct page * __weak
5771
follow_huge_pmd(struct mm_struct *mm, unsigned long address,
5772
		pmd_t *pmd, int flags)
5773
{
5774 5775
	struct page *page = NULL;
	spinlock_t *ptl;
5776
	pte_t pte;
J
John Hubbard 已提交
5777 5778 5779 5780 5781 5782

	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
	if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
			 (FOLL_PIN | FOLL_GET)))
		return NULL;

5783 5784 5785 5786 5787 5788 5789 5790 5791
retry:
	ptl = pmd_lockptr(mm, pmd);
	spin_lock(ptl);
	/*
	 * make sure that the address range covered by this pmd is not
	 * unmapped from other threads.
	 */
	if (!pmd_huge(*pmd))
		goto out;
5792 5793
	pte = huge_ptep_get((pte_t *)pmd);
	if (pte_present(pte)) {
5794
		page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
J
John Hubbard 已提交
5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806
		/*
		 * try_grab_page() should always succeed here, because: a) we
		 * hold the pmd (ptl) lock, and b) we've just checked that the
		 * huge pmd (head) page is present in the page tables. The ptl
		 * prevents the head page and tail pages from being rearranged
		 * in any way. So this page must be available at this point,
		 * unless the page refcount overflowed:
		 */
		if (WARN_ON_ONCE(!try_grab_page(page, flags))) {
			page = NULL;
			goto out;
		}
5807
	} else {
5808
		if (is_hugetlb_entry_migration(pte)) {
5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819
			spin_unlock(ptl);
			__migration_entry_wait(mm, (pte_t *)pmd, ptl);
			goto retry;
		}
		/*
		 * hwpoisoned entry is treated as no_page_table in
		 * follow_page_mask().
		 */
	}
out:
	spin_unlock(ptl);
5820 5821 5822
	return page;
}

5823
struct page * __weak
5824
follow_huge_pud(struct mm_struct *mm, unsigned long address,
5825
		pud_t *pud, int flags)
5826
{
J
John Hubbard 已提交
5827
	if (flags & (FOLL_GET | FOLL_PIN))
5828
		return NULL;
5829

5830
	return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
5831 5832
}

5833 5834 5835
struct page * __weak
follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
{
J
John Hubbard 已提交
5836
	if (flags & (FOLL_GET | FOLL_PIN))
5837 5838 5839 5840 5841
		return NULL;

	return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
}

5842 5843
bool isolate_huge_page(struct page *page, struct list_head *list)
{
5844 5845
	bool ret = true;

5846
	spin_lock_irq(&hugetlb_lock);
5847 5848
	if (!PageHeadHuge(page) ||
	    !HPageMigratable(page) ||
5849
	    !get_page_unless_zero(page)) {
5850 5851 5852
		ret = false;
		goto unlock;
	}
5853
	ClearHPageMigratable(page);
5854
	list_move_tail(&page->lru, list);
5855
unlock:
5856
	spin_unlock_irq(&hugetlb_lock);
5857
	return ret;
5858 5859 5860 5861
}

void putback_active_hugepage(struct page *page)
{
5862
	spin_lock_irq(&hugetlb_lock);
5863
	SetHPageMigratable(page);
5864
	list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
5865
	spin_unlock_irq(&hugetlb_lock);
5866 5867
	put_page(page);
}
5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885

void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
{
	struct hstate *h = page_hstate(oldpage);

	hugetlb_cgroup_migrate(oldpage, newpage);
	set_page_owner_migrate_reason(newpage, reason);

	/*
	 * transfer temporary state of the new huge page. This is
	 * reverse to other transitions because the newpage is going to
	 * be final while the old one will be freed so it takes over
	 * the temporary status.
	 *
	 * Also note that we have to transfer the per-node surplus state
	 * here as well otherwise the global surplus count will not match
	 * the per-node's.
	 */
5886
	if (HPageTemporary(newpage)) {
5887 5888 5889
		int old_nid = page_to_nid(oldpage);
		int new_nid = page_to_nid(newpage);

5890 5891
		SetHPageTemporary(oldpage);
		ClearHPageTemporary(newpage);
5892

5893 5894 5895 5896 5897 5898
		/*
		 * There is no need to transfer the per-node surplus state
		 * when we do not cross the node.
		 */
		if (new_nid == old_nid)
			return;
5899
		spin_lock_irq(&hugetlb_lock);
5900 5901 5902 5903
		if (h->surplus_huge_pages_node[old_nid]) {
			h->surplus_huge_pages_node[old_nid]--;
			h->surplus_huge_pages_node[new_nid]++;
		}
5904
		spin_unlock_irq(&hugetlb_lock);
5905 5906
	}
}
5907

5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958
/*
 * This function will unconditionally remove all the shared pmd pgtable entries
 * within the specific vma for a hugetlbfs memory range.
 */
void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
{
	struct hstate *h = hstate_vma(vma);
	unsigned long sz = huge_page_size(h);
	struct mm_struct *mm = vma->vm_mm;
	struct mmu_notifier_range range;
	unsigned long address, start, end;
	spinlock_t *ptl;
	pte_t *ptep;

	if (!(vma->vm_flags & VM_MAYSHARE))
		return;

	start = ALIGN(vma->vm_start, PUD_SIZE);
	end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);

	if (start >= end)
		return;

	/*
	 * No need to call adjust_range_if_pmd_sharing_possible(), because
	 * we have already done the PUD_SIZE alignment.
	 */
	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
				start, end);
	mmu_notifier_invalidate_range_start(&range);
	i_mmap_lock_write(vma->vm_file->f_mapping);
	for (address = start; address < end; address += PUD_SIZE) {
		unsigned long tmp = address;

		ptep = huge_pte_offset(mm, address, sz);
		if (!ptep)
			continue;
		ptl = huge_pte_lock(h, mm, ptep);
		/* We don't want 'address' to be changed */
		huge_pmd_unshare(mm, vma, &tmp, ptep);
		spin_unlock(ptl);
	}
	flush_hugetlb_tlb_range(vma, start, end);
	i_mmap_unlock_write(vma->vm_file->f_mapping);
	/*
	 * No need to call mmu_notifier_invalidate_range(), see
	 * Documentation/vm/mmu_notifier.rst.
	 */
	mmu_notifier_invalidate_range_end(&range);
}

5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996
#ifdef CONFIG_CMA
static bool cma_reserve_called __initdata;

static int __init cmdline_parse_hugetlb_cma(char *p)
{
	hugetlb_cma_size = memparse(p, &p);
	return 0;
}

early_param("hugetlb_cma", cmdline_parse_hugetlb_cma);

void __init hugetlb_cma_reserve(int order)
{
	unsigned long size, reserved, per_node;
	int nid;

	cma_reserve_called = true;

	if (!hugetlb_cma_size)
		return;

	if (hugetlb_cma_size < (PAGE_SIZE << order)) {
		pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n",
			(PAGE_SIZE << order) / SZ_1M);
		return;
	}

	/*
	 * If 3 GB area is requested on a machine with 4 numa nodes,
	 * let's allocate 1 GB on first three nodes and ignore the last one.
	 */
	per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes);
	pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
		hugetlb_cma_size / SZ_1M, per_node / SZ_1M);

	reserved = 0;
	for_each_node_state(nid, N_ONLINE) {
		int res;
5997
		char name[CMA_MAX_NAME];
5998 5999 6000 6001

		size = min(per_node, hugetlb_cma_size - reserved);
		size = round_up(size, PAGE_SIZE << order);

6002
		snprintf(name, sizeof(name), "hugetlb%d", nid);
6003
		res = cma_declare_contiguous_nid(0, size, 0, PAGE_SIZE << order,
6004
						 0, false, name,
6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029
						 &hugetlb_cma[nid], nid);
		if (res) {
			pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
				res, nid);
			continue;
		}

		reserved += size;
		pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",
			size / SZ_1M, nid);

		if (reserved >= hugetlb_cma_size)
			break;
	}
}

void __init hugetlb_cma_check(void)
{
	if (!hugetlb_cma_size || cma_reserve_called)
		return;

	pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
}

#endif /* CONFIG_CMA */