hugetlb.c 31.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * Generic hugetlb support.
 * (C) William Irwin, April 2004
 */
#include <linux/gfp.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/sysctl.h>
#include <linux/highmem.h>
#include <linux/nodemask.h>
D
David Gibson 已提交
13
#include <linux/pagemap.h>
14
#include <linux/mempolicy.h>
15
#include <linux/cpuset.h>
16
#include <linux/mutex.h>
17

D
David Gibson 已提交
18 19 20 21
#include <asm/page.h>
#include <asm/pgtable.h>

#include <linux/hugetlb.h>
22
#include "internal.h"
L
Linus Torvalds 已提交
23 24

const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
25
static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
26
static unsigned long surplus_huge_pages;
27
static unsigned long nr_overcommit_huge_pages;
L
Linus Torvalds 已提交
28
unsigned long max_huge_pages;
29
unsigned long sysctl_overcommit_huge_pages;
L
Linus Torvalds 已提交
30 31 32
static struct list_head hugepage_freelists[MAX_NUMNODES];
static unsigned int nr_huge_pages_node[MAX_NUMNODES];
static unsigned int free_huge_pages_node[MAX_NUMNODES];
33
static unsigned int surplus_huge_pages_node[MAX_NUMNODES];
34 35
static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
unsigned long hugepages_treat_as_movable;
36
static int hugetlb_next_nid;
37

38 39 40 41
/*
 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
 */
static DEFINE_SPINLOCK(hugetlb_lock);
42

43 44 45 46 47 48 49
static void clear_huge_page(struct page *page, unsigned long addr)
{
	int i;

	might_sleep();
	for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
		cond_resched();
50
		clear_user_highpage(page + i, addr + i * PAGE_SIZE);
51 52 53 54
	}
}

static void copy_huge_page(struct page *dst, struct page *src,
55
			   unsigned long addr, struct vm_area_struct *vma)
56 57 58 59 60 61
{
	int i;

	might_sleep();
	for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
		cond_resched();
62
		copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
63 64 65
	}
}

L
Linus Torvalds 已提交
66 67 68 69 70 71 72 73
static void enqueue_huge_page(struct page *page)
{
	int nid = page_to_nid(page);
	list_add(&page->lru, &hugepage_freelists[nid]);
	free_huge_pages++;
	free_huge_pages_node[nid]++;
}

74 75
static struct page *dequeue_huge_page(struct vm_area_struct *vma,
				unsigned long address)
L
Linus Torvalds 已提交
76
{
77
	int nid;
L
Linus Torvalds 已提交
78
	struct page *page = NULL;
79
	struct mempolicy *mpol;
80
	struct zonelist *zonelist = huge_zonelist(vma, address,
81
					htlb_alloc_mask, &mpol);
82
	struct zone **z;
L
Linus Torvalds 已提交
83

84
	for (z = zonelist->zones; *z; z++) {
85
		nid = zone_to_nid(*z);
86
		if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) &&
A
Andrew Morton 已提交
87 88 89 90 91 92
		    !list_empty(&hugepage_freelists[nid])) {
			page = list_entry(hugepage_freelists[nid].next,
					  struct page, lru);
			list_del(&page->lru);
			free_huge_pages--;
			free_huge_pages_node[nid]--;
93 94
			if (vma && vma->vm_flags & VM_MAYSHARE)
				resv_huge_pages--;
K
Ken Chen 已提交
95
			break;
A
Andrew Morton 已提交
96
		}
L
Linus Torvalds 已提交
97
	}
98
	mpol_free(mpol);	/* unref if mpol !NULL */
L
Linus Torvalds 已提交
99 100 101
	return page;
}

A
Adam Litke 已提交
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
static void update_and_free_page(struct page *page)
{
	int i;
	nr_huge_pages--;
	nr_huge_pages_node[page_to_nid(page)]--;
	for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
		page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
				1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
				1 << PG_private | 1<< PG_writeback);
	}
	set_compound_page_dtor(page, NULL);
	set_page_refcounted(page);
	__free_pages(page, HUGETLB_PAGE_ORDER);
}

117 118
static void free_huge_page(struct page *page)
{
119
	int nid = page_to_nid(page);
120
	struct address_space *mapping;
121

122
	mapping = (struct address_space *) page_private(page);
123
	set_page_private(page, 0);
124
	BUG_ON(page_count(page));
125 126 127
	INIT_LIST_HEAD(&page->lru);

	spin_lock(&hugetlb_lock);
128 129 130 131 132 133 134
	if (surplus_huge_pages_node[nid]) {
		update_and_free_page(page);
		surplus_huge_pages--;
		surplus_huge_pages_node[nid]--;
	} else {
		enqueue_huge_page(page);
	}
135
	spin_unlock(&hugetlb_lock);
136
	if (mapping)
137
		hugetlb_put_quota(mapping, 1);
138 139
}

140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
/*
 * Increment or decrement surplus_huge_pages.  Keep node-specific counters
 * balanced by operating on them in a round-robin fashion.
 * Returns 1 if an adjustment was made.
 */
static int adjust_pool_surplus(int delta)
{
	static int prev_nid;
	int nid = prev_nid;
	int ret = 0;

	VM_BUG_ON(delta != -1 && delta != 1);
	do {
		nid = next_node(nid, node_online_map);
		if (nid == MAX_NUMNODES)
			nid = first_node(node_online_map);

		/* To shrink on this node, there must be a surplus page */
		if (delta < 0 && !surplus_huge_pages_node[nid])
			continue;
		/* Surplus cannot exceed the total number of pages */
		if (delta > 0 && surplus_huge_pages_node[nid] >=
						nr_huge_pages_node[nid])
			continue;

		surplus_huge_pages += delta;
		surplus_huge_pages_node[nid] += delta;
		ret = 1;
		break;
	} while (nid != prev_nid);

	prev_nid = nid;
	return ret;
}

175
static struct page *alloc_fresh_huge_page_node(int nid)
L
Linus Torvalds 已提交
176 177
{
	struct page *page;
178

179 180 181
	page = alloc_pages_node(nid,
		htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|__GFP_NOWARN,
		HUGETLB_PAGE_ORDER);
L
Linus Torvalds 已提交
182
	if (page) {
183
		set_compound_page_dtor(page, free_huge_page);
184
		spin_lock(&hugetlb_lock);
L
Linus Torvalds 已提交
185
		nr_huge_pages++;
186
		nr_huge_pages_node[nid]++;
187
		spin_unlock(&hugetlb_lock);
N
Nick Piggin 已提交
188
		put_page(page); /* free it into the hugepage allocator */
L
Linus Torvalds 已提交
189
	}
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224

	return page;
}

static int alloc_fresh_huge_page(void)
{
	struct page *page;
	int start_nid;
	int next_nid;
	int ret = 0;

	start_nid = hugetlb_next_nid;

	do {
		page = alloc_fresh_huge_page_node(hugetlb_next_nid);
		if (page)
			ret = 1;
		/*
		 * Use a helper variable to find the next node and then
		 * copy it back to hugetlb_next_nid afterwards:
		 * otherwise there's a window in which a racer might
		 * pass invalid nid MAX_NUMNODES to alloc_pages_node.
		 * But we don't need to use a spin_lock here: it really
		 * doesn't matter if occasionally a racer chooses the
		 * same nid as we do.  Move nid forward in the mask even
		 * if we just successfully allocated a hugepage so that
		 * the next caller gets hugepages on the next node.
		 */
		next_nid = next_node(hugetlb_next_nid, node_online_map);
		if (next_nid == MAX_NUMNODES)
			next_nid = first_node(node_online_map);
		hugetlb_next_nid = next_nid;
	} while (!page && hugetlb_next_nid != start_nid);

	return ret;
L
Linus Torvalds 已提交
225 226
}

227 228 229 230
static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
						unsigned long address)
{
	struct page *page;
231
	unsigned int nid;
232

233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
	/*
	 * Assume we will successfully allocate the surplus page to
	 * prevent racing processes from causing the surplus to exceed
	 * overcommit
	 *
	 * This however introduces a different race, where a process B
	 * tries to grow the static hugepage pool while alloc_pages() is
	 * called by process A. B will only examine the per-node
	 * counters in determining if surplus huge pages can be
	 * converted to normal huge pages in adjust_pool_surplus(). A
	 * won't be able to increment the per-node counter, until the
	 * lock is dropped by B, but B doesn't drop hugetlb_lock until
	 * no more huge pages can be converted from surplus to normal
	 * state (and doesn't try to convert again). Thus, we have a
	 * case where a surplus huge page exists, the pool is grown, and
	 * the surplus huge page still exists after, even though it
	 * should just have been converted to a normal huge page. This
	 * does not leak memory, though, as the hugepage will be freed
	 * once it is out of use. It also does not allow the counters to
	 * go out of whack in adjust_pool_surplus() as we don't modify
	 * the node values until we've gotten the hugepage and only the
	 * per-node value is checked there.
	 */
	spin_lock(&hugetlb_lock);
	if (surplus_huge_pages >= nr_overcommit_huge_pages) {
		spin_unlock(&hugetlb_lock);
		return NULL;
	} else {
		nr_huge_pages++;
		surplus_huge_pages++;
	}
	spin_unlock(&hugetlb_lock);

266 267
	page = alloc_pages(htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
					HUGETLB_PAGE_ORDER);
268 269

	spin_lock(&hugetlb_lock);
270
	if (page) {
271
		nid = page_to_nid(page);
272
		set_compound_page_dtor(page, free_huge_page);
273 274 275 276 277 278 279 280
		/*
		 * We incremented the global counters already
		 */
		nr_huge_pages_node[nid]++;
		surplus_huge_pages_node[nid]++;
	} else {
		nr_huge_pages--;
		surplus_huge_pages--;
281
	}
282
	spin_unlock(&hugetlb_lock);
283 284 285 286

	return page;
}

287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
/*
 * Increase the hugetlb pool such that it can accomodate a reservation
 * of size 'delta'.
 */
static int gather_surplus_pages(int delta)
{
	struct list_head surplus_list;
	struct page *page, *tmp;
	int ret, i;
	int needed, allocated;

	needed = (resv_huge_pages + delta) - free_huge_pages;
	if (needed <= 0)
		return 0;

	allocated = 0;
	INIT_LIST_HEAD(&surplus_list);

	ret = -ENOMEM;
retry:
	spin_unlock(&hugetlb_lock);
	for (i = 0; i < needed; i++) {
		page = alloc_buddy_huge_page(NULL, 0);
		if (!page) {
			/*
			 * We were not able to allocate enough pages to
			 * satisfy the entire reservation so we free what
			 * we've allocated so far.
			 */
			spin_lock(&hugetlb_lock);
			needed = 0;
			goto free;
		}

		list_add(&page->lru, &surplus_list);
	}
	allocated += needed;

	/*
	 * After retaking hugetlb_lock, we need to recalculate 'needed'
	 * because either resv_huge_pages or free_huge_pages may have changed.
	 */
	spin_lock(&hugetlb_lock);
	needed = (resv_huge_pages + delta) - (free_huge_pages + allocated);
	if (needed > 0)
		goto retry;

	/*
	 * The surplus_list now contains _at_least_ the number of extra pages
	 * needed to accomodate the reservation.  Add the appropriate number
	 * of pages to the hugetlb pool and free the extras back to the buddy
	 * allocator.
	 */
	needed += allocated;
	ret = 0;
free:
	list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
		list_del(&page->lru);
		if ((--needed) >= 0)
			enqueue_huge_page(page);
347 348 349 350 351 352 353 354 355 356 357
		else {
			/*
			 * Decrement the refcount and free the page using its
			 * destructor.  This must be done with hugetlb_lock
			 * unlocked which is safe because free_huge_page takes
			 * hugetlb_lock before deciding how to free the page.
			 */
			spin_unlock(&hugetlb_lock);
			put_page(page);
			spin_lock(&hugetlb_lock);
		}
358 359 360 361 362 363 364 365 366 367
	}

	return ret;
}

/*
 * When releasing a hugetlb pool reservation, any surplus pages that were
 * allocated to satisfy the reservation must be explicitly freed if they were
 * never used.
 */
A
Adrian Bunk 已提交
368
static void return_unused_surplus_pages(unsigned long unused_resv_pages)
369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397
{
	static int nid = -1;
	struct page *page;
	unsigned long nr_pages;

	nr_pages = min(unused_resv_pages, surplus_huge_pages);

	while (nr_pages) {
		nid = next_node(nid, node_online_map);
		if (nid == MAX_NUMNODES)
			nid = first_node(node_online_map);

		if (!surplus_huge_pages_node[nid])
			continue;

		if (!list_empty(&hugepage_freelists[nid])) {
			page = list_entry(hugepage_freelists[nid].next,
					  struct page, lru);
			list_del(&page->lru);
			update_and_free_page(page);
			free_huge_pages--;
			free_huge_pages_node[nid]--;
			surplus_huge_pages--;
			surplus_huge_pages_node[nid]--;
			nr_pages--;
		}
	}
}

398 399 400

static struct page *alloc_huge_page_shared(struct vm_area_struct *vma,
						unsigned long addr)
L
Linus Torvalds 已提交
401
{
402
	struct page *page;
L
Linus Torvalds 已提交
403 404

	spin_lock(&hugetlb_lock);
405
	page = dequeue_huge_page(vma, addr);
L
Linus Torvalds 已提交
406
	spin_unlock(&hugetlb_lock);
407
	return page ? page : ERR_PTR(-VM_FAULT_OOM);
408
}
409

410 411 412 413
static struct page *alloc_huge_page_private(struct vm_area_struct *vma,
						unsigned long addr)
{
	struct page *page = NULL;
414

415 416 417
	if (hugetlb_get_quota(vma->vm_file->f_mapping, 1))
		return ERR_PTR(-VM_FAULT_SIGBUS);

418 419 420 421
	spin_lock(&hugetlb_lock);
	if (free_huge_pages > resv_huge_pages)
		page = dequeue_huge_page(vma, addr);
	spin_unlock(&hugetlb_lock);
K
Ken Chen 已提交
422
	if (!page) {
423
		page = alloc_buddy_huge_page(vma, addr);
K
Ken Chen 已提交
424 425 426 427 428 429
		if (!page) {
			hugetlb_put_quota(vma->vm_file->f_mapping, 1);
			return ERR_PTR(-VM_FAULT_OOM);
		}
	}
	return page;
430 431 432 433 434 435
}

static struct page *alloc_huge_page(struct vm_area_struct *vma,
				    unsigned long addr)
{
	struct page *page;
436 437
	struct address_space *mapping = vma->vm_file->f_mapping;

438 439 440 441
	if (vma->vm_flags & VM_MAYSHARE)
		page = alloc_huge_page_shared(vma, addr);
	else
		page = alloc_huge_page_private(vma, addr);
442 443

	if (!IS_ERR(page)) {
444
		set_page_refcounted(page);
445
		set_page_private(page, (unsigned long) mapping);
446 447
	}
	return page;
448 449
}

L
Linus Torvalds 已提交
450 451 452 453
static int __init hugetlb_init(void)
{
	unsigned long i;

454 455 456
	if (HPAGE_SHIFT == 0)
		return 0;

L
Linus Torvalds 已提交
457 458 459
	for (i = 0; i < MAX_NUMNODES; ++i)
		INIT_LIST_HEAD(&hugepage_freelists[i]);

460 461
	hugetlb_next_nid = first_node(node_online_map);

L
Linus Torvalds 已提交
462
	for (i = 0; i < max_huge_pages; ++i) {
N
Nick Piggin 已提交
463
		if (!alloc_fresh_huge_page())
L
Linus Torvalds 已提交
464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479
			break;
	}
	max_huge_pages = free_huge_pages = nr_huge_pages = i;
	printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
	return 0;
}
module_init(hugetlb_init);

static int __init hugetlb_setup(char *s)
{
	if (sscanf(s, "%lu", &max_huge_pages) <= 0)
		max_huge_pages = 0;
	return 1;
}
__setup("hugepages=", hugetlb_setup);

480 481 482 483 484 485 486 487 488 489 490
static unsigned int cpuset_mems_nr(unsigned int *array)
{
	int node;
	unsigned int nr = 0;

	for_each_node_mask(node, cpuset_current_mems_allowed)
		nr += array[node];

	return nr;
}

L
Linus Torvalds 已提交
491 492 493 494
#ifdef CONFIG_SYSCTL
#ifdef CONFIG_HIGHMEM
static void try_to_free_low(unsigned long count)
{
495 496
	int i;

L
Linus Torvalds 已提交
497 498 499
	for (i = 0; i < MAX_NUMNODES; ++i) {
		struct page *page, *next;
		list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
500 501
			if (count >= nr_huge_pages)
				return;
L
Linus Torvalds 已提交
502 503 504 505 506
			if (PageHighMem(page))
				continue;
			list_del(&page->lru);
			update_and_free_page(page);
			free_huge_pages--;
507
			free_huge_pages_node[page_to_nid(page)]--;
L
Linus Torvalds 已提交
508 509 510 511 512 513 514 515 516
		}
	}
}
#else
static inline void try_to_free_low(unsigned long count)
{
}
#endif

517
#define persistent_huge_pages (nr_huge_pages - surplus_huge_pages)
L
Linus Torvalds 已提交
518 519
static unsigned long set_max_huge_pages(unsigned long count)
{
520
	unsigned long min_count, ret;
L
Linus Torvalds 已提交
521

522 523 524 525
	/*
	 * Increase the pool size
	 * First take pages out of surplus state.  Then make up the
	 * remaining difference by allocating fresh huge pages.
526 527 528 529 530 531
	 *
	 * We might race with alloc_buddy_huge_page() here and be unable
	 * to convert a surplus huge page to a normal huge page. That is
	 * not critical, though, it just means the overall size of the
	 * pool might be one hugepage larger than it needs to be, but
	 * within all the constraints specified by the sysctls.
532
	 */
L
Linus Torvalds 已提交
533
	spin_lock(&hugetlb_lock);
534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559
	while (surplus_huge_pages && count > persistent_huge_pages) {
		if (!adjust_pool_surplus(-1))
			break;
	}

	while (count > persistent_huge_pages) {
		int ret;
		/*
		 * If this allocation races such that we no longer need the
		 * page, free_huge_page will handle it by freeing the page
		 * and reducing the surplus.
		 */
		spin_unlock(&hugetlb_lock);
		ret = alloc_fresh_huge_page();
		spin_lock(&hugetlb_lock);
		if (!ret)
			goto out;

	}

	/*
	 * Decrease the pool size
	 * First return free pages to the buddy allocator (being careful
	 * to keep enough around to satisfy reservations).  Then place
	 * pages into surplus state as needed so the pool will shrink
	 * to the desired size as pages become free.
560 561 562 563 564 565 566 567
	 *
	 * By placing pages into the surplus state independent of the
	 * overcommit value, we are allowing the surplus pool size to
	 * exceed overcommit. There are few sane options here. Since
	 * alloc_buddy_huge_page() is checking the global counter,
	 * though, we'll note that we're not allowed to exceed surplus
	 * and won't grow the pool anywhere else. Not until one of the
	 * sysctls are changed, or the surplus pages go out of use.
568
	 */
569 570
	min_count = resv_huge_pages + nr_huge_pages - free_huge_pages;
	min_count = max(count, min_count);
571 572
	try_to_free_low(min_count);
	while (min_count < persistent_huge_pages) {
573
		struct page *page = dequeue_huge_page(NULL, 0);
L
Linus Torvalds 已提交
574 575 576 577
		if (!page)
			break;
		update_and_free_page(page);
	}
578 579 580 581 582 583
	while (count < persistent_huge_pages) {
		if (!adjust_pool_surplus(1))
			break;
	}
out:
	ret = persistent_huge_pages;
L
Linus Torvalds 已提交
584
	spin_unlock(&hugetlb_lock);
585
	return ret;
L
Linus Torvalds 已提交
586 587 588 589 590 591 592 593 594 595
}

int hugetlb_sysctl_handler(struct ctl_table *table, int write,
			   struct file *file, void __user *buffer,
			   size_t *length, loff_t *ppos)
{
	proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
	max_huge_pages = set_max_huge_pages(max_huge_pages);
	return 0;
}
596 597 598 599 600 601 602 603 604 605 606 607 608

int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
			struct file *file, void __user *buffer,
			size_t *length, loff_t *ppos)
{
	proc_dointvec(table, write, file, buffer, length, ppos);
	if (hugepages_treat_as_movable)
		htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
	else
		htlb_alloc_mask = GFP_HIGHUSER;
	return 0;
}

609 610 611 612 613
int hugetlb_overcommit_handler(struct ctl_table *table, int write,
			struct file *file, void __user *buffer,
			size_t *length, loff_t *ppos)
{
	proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
614 615
	spin_lock(&hugetlb_lock);
	nr_overcommit_huge_pages = sysctl_overcommit_huge_pages;
616 617 618 619
	spin_unlock(&hugetlb_lock);
	return 0;
}

L
Linus Torvalds 已提交
620 621 622 623 624 625 626
#endif /* CONFIG_SYSCTL */

int hugetlb_report_meminfo(char *buf)
{
	return sprintf(buf,
			"HugePages_Total: %5lu\n"
			"HugePages_Free:  %5lu\n"
627
			"HugePages_Rsvd:  %5lu\n"
628
			"HugePages_Surp:  %5lu\n"
L
Linus Torvalds 已提交
629 630 631
			"Hugepagesize:    %5lu kB\n",
			nr_huge_pages,
			free_huge_pages,
632
			resv_huge_pages,
633
			surplus_huge_pages,
L
Linus Torvalds 已提交
634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657
			HPAGE_SIZE/1024);
}

int hugetlb_report_node_meminfo(int nid, char *buf)
{
	return sprintf(buf,
		"Node %d HugePages_Total: %5u\n"
		"Node %d HugePages_Free:  %5u\n",
		nid, nr_huge_pages_node[nid],
		nid, free_huge_pages_node[nid]);
}

/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
unsigned long hugetlb_total_pages(void)
{
	return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
}

/*
 * We cannot handle pagefaults against hugetlb pages at all.  They cause
 * handle_mm_fault() to try to instantiate regular-sized pages in the
 * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
 * this far.
 */
N
Nick Piggin 已提交
658
static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
L
Linus Torvalds 已提交
659 660
{
	BUG();
N
Nick Piggin 已提交
661
	return 0;
L
Linus Torvalds 已提交
662 663 664
}

struct vm_operations_struct hugetlb_vm_ops = {
N
Nick Piggin 已提交
665
	.fault = hugetlb_vm_op_fault,
L
Linus Torvalds 已提交
666 667
};

668 669
static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
				int writable)
D
David Gibson 已提交
670 671 672
{
	pte_t entry;

673
	if (writable) {
D
David Gibson 已提交
674 675 676 677 678 679 680 681 682 683 684
		entry =
		    pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
	} else {
		entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
	}
	entry = pte_mkyoung(entry);
	entry = pte_mkhuge(entry);

	return entry;
}

685 686 687 688 689 690
static void set_huge_ptep_writable(struct vm_area_struct *vma,
				   unsigned long address, pte_t *ptep)
{
	pte_t entry;

	entry = pte_mkwrite(pte_mkdirty(*ptep));
691 692 693
	if (ptep_set_access_flags(vma, address, ptep, entry, 1)) {
		update_mmu_cache(vma, address, entry);
	}
694 695 696
}


D
David Gibson 已提交
697 698 699 700 701
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
			    struct vm_area_struct *vma)
{
	pte_t *src_pte, *dst_pte, entry;
	struct page *ptepage;
702
	unsigned long addr;
703 704 705
	int cow;

	cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
D
David Gibson 已提交
706

707
	for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
H
Hugh Dickins 已提交
708 709 710
		src_pte = huge_pte_offset(src, addr);
		if (!src_pte)
			continue;
D
David Gibson 已提交
711 712 713
		dst_pte = huge_pte_alloc(dst, addr);
		if (!dst_pte)
			goto nomem;
714 715 716 717 718

		/* If the pagetables are shared don't copy or take references */
		if (dst_pte == src_pte)
			continue;

H
Hugh Dickins 已提交
719
		spin_lock(&dst->page_table_lock);
720
		spin_lock(&src->page_table_lock);
H
Hugh Dickins 已提交
721
		if (!pte_none(*src_pte)) {
722 723
			if (cow)
				ptep_set_wrprotect(src, addr, src_pte);
724 725 726 727 728 729
			entry = *src_pte;
			ptepage = pte_page(entry);
			get_page(ptepage);
			set_huge_pte_at(dst, addr, dst_pte, entry);
		}
		spin_unlock(&src->page_table_lock);
H
Hugh Dickins 已提交
730
		spin_unlock(&dst->page_table_lock);
D
David Gibson 已提交
731 732 733 734 735 736 737
	}
	return 0;

nomem:
	return -ENOMEM;
}

738 739
void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
			    unsigned long end)
D
David Gibson 已提交
740 741 742
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long address;
743
	pte_t *ptep;
D
David Gibson 已提交
744 745
	pte_t pte;
	struct page *page;
746
	struct page *tmp;
747 748 749 750 751
	/*
	 * A page gathering list, protected by per file i_mmap_lock. The
	 * lock is used to avoid list corruption from multiple unmapping
	 * of the same page since we are using page->lru.
	 */
752
	LIST_HEAD(page_list);
D
David Gibson 已提交
753 754 755 756 757

	WARN_ON(!is_vm_hugetlb_page(vma));
	BUG_ON(start & ~HPAGE_MASK);
	BUG_ON(end & ~HPAGE_MASK);

758
	spin_lock(&mm->page_table_lock);
D
David Gibson 已提交
759
	for (address = start; address < end; address += HPAGE_SIZE) {
760
		ptep = huge_pte_offset(mm, address);
A
Adam Litke 已提交
761
		if (!ptep)
762 763
			continue;

764 765 766
		if (huge_pmd_unshare(mm, &address, ptep))
			continue;

767
		pte = huge_ptep_get_and_clear(mm, address, ptep);
D
David Gibson 已提交
768 769
		if (pte_none(pte))
			continue;
770

D
David Gibson 已提交
771
		page = pte_page(pte);
772 773
		if (pte_dirty(pte))
			set_page_dirty(page);
774
		list_add(&page->lru, &page_list);
D
David Gibson 已提交
775
	}
L
Linus Torvalds 已提交
776
	spin_unlock(&mm->page_table_lock);
777
	flush_tlb_range(vma, start, end);
778 779 780 781
	list_for_each_entry_safe(page, tmp, &page_list, lru) {
		list_del(&page->lru);
		put_page(page);
	}
L
Linus Torvalds 已提交
782
}
D
David Gibson 已提交
783

784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
			  unsigned long end)
{
	/*
	 * It is undesirable to test vma->vm_file as it should be non-null
	 * for valid hugetlb area. However, vm_file will be NULL in the error
	 * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
	 * do_mmap_pgoff() nullifies vma->vm_file before calling this function
	 * to clean up. Since no pte has actually been setup, it is safe to
	 * do nothing in this case.
	 */
	if (vma->vm_file) {
		spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
		__unmap_hugepage_range(vma, start, end);
		spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
	}
}

802 803 804 805
static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
			unsigned long address, pte_t *ptep, pte_t pte)
{
	struct page *old_page, *new_page;
806
	int avoidcopy;
807 808 809 810 811 812 813 814

	old_page = pte_page(pte);

	/* If no-one else is actually using this page, avoid the copy
	 * and just make the page writable */
	avoidcopy = (page_count(old_page) == 1);
	if (avoidcopy) {
		set_huge_ptep_writable(vma, address, ptep);
N
Nick Piggin 已提交
815
		return 0;
816 817 818
	}

	page_cache_get(old_page);
819
	new_page = alloc_huge_page(vma, address);
820

821
	if (IS_ERR(new_page)) {
822
		page_cache_release(old_page);
823
		return -PTR_ERR(new_page);
824 825 826
	}

	spin_unlock(&mm->page_table_lock);
827
	copy_huge_page(new_page, old_page, address, vma);
N
Nick Piggin 已提交
828
	__SetPageUptodate(new_page);
829 830 831 832 833 834 835 836 837 838 839 840
	spin_lock(&mm->page_table_lock);

	ptep = huge_pte_offset(mm, address & HPAGE_MASK);
	if (likely(pte_same(*ptep, pte))) {
		/* Break COW */
		set_huge_pte_at(mm, address, ptep,
				make_huge_pte(vma, new_page, 1));
		/* Make the old page be freed below */
		new_page = old_page;
	}
	page_cache_release(new_page);
	page_cache_release(old_page);
N
Nick Piggin 已提交
841
	return 0;
842 843
}

844
static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
845
			unsigned long address, pte_t *ptep, int write_access)
846 847
{
	int ret = VM_FAULT_SIGBUS;
A
Adam Litke 已提交
848 849 850 851
	unsigned long idx;
	unsigned long size;
	struct page *page;
	struct address_space *mapping;
852
	pte_t new_pte;
A
Adam Litke 已提交
853 854 855 856 857 858 859 860 861

	mapping = vma->vm_file->f_mapping;
	idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
		+ (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));

	/*
	 * Use page lock to guard against racing truncation
	 * before we get page_table_lock.
	 */
862 863 864
retry:
	page = find_lock_page(mapping, idx);
	if (!page) {
865 866 867
		size = i_size_read(mapping->host) >> HPAGE_SHIFT;
		if (idx >= size)
			goto out;
868
		page = alloc_huge_page(vma, address);
869 870
		if (IS_ERR(page)) {
			ret = -PTR_ERR(page);
871 872
			goto out;
		}
873
		clear_huge_page(page, address);
N
Nick Piggin 已提交
874
		__SetPageUptodate(page);
875

876 877
		if (vma->vm_flags & VM_SHARED) {
			int err;
K
Ken Chen 已提交
878
			struct inode *inode = mapping->host;
879 880 881 882 883 884 885 886

			err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
			if (err) {
				put_page(page);
				if (err == -EEXIST)
					goto retry;
				goto out;
			}
K
Ken Chen 已提交
887 888 889 890

			spin_lock(&inode->i_lock);
			inode->i_blocks += BLOCKS_PER_HUGEPAGE;
			spin_unlock(&inode->i_lock);
891 892 893
		} else
			lock_page(page);
	}
894

895
	spin_lock(&mm->page_table_lock);
A
Adam Litke 已提交
896 897 898 899
	size = i_size_read(mapping->host) >> HPAGE_SHIFT;
	if (idx >= size)
		goto backout;

N
Nick Piggin 已提交
900
	ret = 0;
901
	if (!pte_none(*ptep))
A
Adam Litke 已提交
902 903
		goto backout;

904 905 906 907 908 909 910 911 912
	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
				&& (vma->vm_flags & VM_SHARED)));
	set_huge_pte_at(mm, address, ptep, new_pte);

	if (write_access && !(vma->vm_flags & VM_SHARED)) {
		/* Optimization, do the COW without a second fault */
		ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
	}

913
	spin_unlock(&mm->page_table_lock);
A
Adam Litke 已提交
914 915
	unlock_page(page);
out:
916
	return ret;
A
Adam Litke 已提交
917 918 919 920 921 922

backout:
	spin_unlock(&mm->page_table_lock);
	unlock_page(page);
	put_page(page);
	goto out;
923 924
}

925 926 927 928 929
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
			unsigned long address, int write_access)
{
	pte_t *ptep;
	pte_t entry;
930
	int ret;
931
	static DEFINE_MUTEX(hugetlb_instantiation_mutex);
932 933 934 935 936

	ptep = huge_pte_alloc(mm, address);
	if (!ptep)
		return VM_FAULT_OOM;

937 938 939 940 941 942
	/*
	 * Serialize hugepage allocation and instantiation, so that we don't
	 * get spurious allocation failures if two CPUs race to instantiate
	 * the same page in the page cache.
	 */
	mutex_lock(&hugetlb_instantiation_mutex);
943
	entry = *ptep;
944 945 946 947 948
	if (pte_none(entry)) {
		ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
		mutex_unlock(&hugetlb_instantiation_mutex);
		return ret;
	}
949

N
Nick Piggin 已提交
950
	ret = 0;
951 952 953 954 955 956 957

	spin_lock(&mm->page_table_lock);
	/* Check for a racing update before calling hugetlb_cow */
	if (likely(pte_same(entry, *ptep)))
		if (write_access && !pte_write(entry))
			ret = hugetlb_cow(mm, vma, address, ptep, entry);
	spin_unlock(&mm->page_table_lock);
958
	mutex_unlock(&hugetlb_instantiation_mutex);
959 960

	return ret;
961 962
}

D
David Gibson 已提交
963 964
int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
			struct page **pages, struct vm_area_struct **vmas,
965 966
			unsigned long *position, int *length, int i,
			int write)
D
David Gibson 已提交
967
{
968 969
	unsigned long pfn_offset;
	unsigned long vaddr = *position;
D
David Gibson 已提交
970 971
	int remainder = *length;

972
	spin_lock(&mm->page_table_lock);
D
David Gibson 已提交
973
	while (vaddr < vma->vm_end && remainder) {
A
Adam Litke 已提交
974 975
		pte_t *pte;
		struct page *page;
D
David Gibson 已提交
976

A
Adam Litke 已提交
977 978 979 980 981 982
		/*
		 * Some archs (sparc64, sh*) have multiple pte_ts to
		 * each hugepage.  We have to make * sure we get the
		 * first, for the page indexing below to work.
		 */
		pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
D
David Gibson 已提交
983

984
		if (!pte || pte_none(*pte) || (write && !pte_write(*pte))) {
A
Adam Litke 已提交
985
			int ret;
D
David Gibson 已提交
986

A
Adam Litke 已提交
987
			spin_unlock(&mm->page_table_lock);
988
			ret = hugetlb_fault(mm, vma, vaddr, write);
A
Adam Litke 已提交
989
			spin_lock(&mm->page_table_lock);
990
			if (!(ret & VM_FAULT_ERROR))
A
Adam Litke 已提交
991
				continue;
D
David Gibson 已提交
992

A
Adam Litke 已提交
993 994 995 996 997 998
			remainder = 0;
			if (!i)
				i = -EFAULT;
			break;
		}

999 1000 1001
		pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
		page = pte_page(*pte);
same_page:
1002 1003
		if (pages) {
			get_page(page);
1004
			pages[i] = page + pfn_offset;
1005
		}
D
David Gibson 已提交
1006 1007 1008 1009 1010

		if (vmas)
			vmas[i] = vma;

		vaddr += PAGE_SIZE;
1011
		++pfn_offset;
D
David Gibson 已提交
1012 1013
		--remainder;
		++i;
1014 1015 1016 1017 1018 1019 1020 1021
		if (vaddr < vma->vm_end && remainder &&
				pfn_offset < HPAGE_SIZE/PAGE_SIZE) {
			/*
			 * We use pfn_offset to avoid touching the pageframes
			 * of this compound page.
			 */
			goto same_page;
		}
D
David Gibson 已提交
1022
	}
1023
	spin_unlock(&mm->page_table_lock);
D
David Gibson 已提交
1024 1025 1026 1027 1028
	*length = remainder;
	*position = vaddr;

	return i;
}
1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040

void hugetlb_change_protection(struct vm_area_struct *vma,
		unsigned long address, unsigned long end, pgprot_t newprot)
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long start = address;
	pte_t *ptep;
	pte_t pte;

	BUG_ON(address >= end);
	flush_cache_range(vma, address, end);

1041
	spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
1042 1043 1044 1045 1046
	spin_lock(&mm->page_table_lock);
	for (; address < end; address += HPAGE_SIZE) {
		ptep = huge_pte_offset(mm, address);
		if (!ptep)
			continue;
1047 1048
		if (huge_pmd_unshare(mm, &address, ptep))
			continue;
1049 1050 1051 1052 1053 1054 1055
		if (!pte_none(*ptep)) {
			pte = huge_ptep_get_and_clear(mm, address, ptep);
			pte = pte_mkhuge(pte_modify(pte, newprot));
			set_huge_pte_at(mm, address, ptep, pte);
		}
	}
	spin_unlock(&mm->page_table_lock);
1056
	spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
1057 1058 1059 1060

	flush_tlb_range(vma, start, end);
}

1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114
struct file_region {
	struct list_head link;
	long from;
	long to;
};

static long region_add(struct list_head *head, long f, long t)
{
	struct file_region *rg, *nrg, *trg;

	/* Locate the region we are either in or before. */
	list_for_each_entry(rg, head, link)
		if (f <= rg->to)
			break;

	/* Round our left edge to the current segment if it encloses us. */
	if (f > rg->from)
		f = rg->from;

	/* Check for and consume any regions we now overlap with. */
	nrg = rg;
	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
		if (&rg->link == head)
			break;
		if (rg->from > t)
			break;

		/* If this area reaches higher then extend our area to
		 * include it completely.  If this is not the first area
		 * which we intend to reuse, free it. */
		if (rg->to > t)
			t = rg->to;
		if (rg != nrg) {
			list_del(&rg->link);
			kfree(rg);
		}
	}
	nrg->from = f;
	nrg->to = t;
	return 0;
}

static long region_chg(struct list_head *head, long f, long t)
{
	struct file_region *rg, *nrg;
	long chg = 0;

	/* Locate the region we are before or in. */
	list_for_each_entry(rg, head, link)
		if (f <= rg->to)
			break;

	/* If we are below the current region then a new region is required.
	 * Subtle, allocate a new region at the position but make it zero
S
Simon Arlott 已提交
1115
	 * size such that we can guarantee to record the reservation. */
1116 1117
	if (&rg->link == head || t < rg->from) {
		nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
1118
		if (!nrg)
1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186
			return -ENOMEM;
		nrg->from = f;
		nrg->to   = f;
		INIT_LIST_HEAD(&nrg->link);
		list_add(&nrg->link, rg->link.prev);

		return t - f;
	}

	/* Round our left edge to the current segment if it encloses us. */
	if (f > rg->from)
		f = rg->from;
	chg = t - f;

	/* Check for and consume any regions we now overlap with. */
	list_for_each_entry(rg, rg->link.prev, link) {
		if (&rg->link == head)
			break;
		if (rg->from > t)
			return chg;

		/* We overlap with this area, if it extends futher than
		 * us then we must extend ourselves.  Account for its
		 * existing reservation. */
		if (rg->to > t) {
			chg += rg->to - t;
			t = rg->to;
		}
		chg -= rg->to - rg->from;
	}
	return chg;
}

static long region_truncate(struct list_head *head, long end)
{
	struct file_region *rg, *trg;
	long chg = 0;

	/* Locate the region we are either in or before. */
	list_for_each_entry(rg, head, link)
		if (end <= rg->to)
			break;
	if (&rg->link == head)
		return 0;

	/* If we are in the middle of a region then adjust it. */
	if (end > rg->from) {
		chg = rg->to - end;
		rg->to = end;
		rg = list_entry(rg->link.next, typeof(*rg), link);
	}

	/* Drop any remaining regions. */
	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
		if (&rg->link == head)
			break;
		chg += rg->to - rg->from;
		list_del(&rg->link);
		kfree(rg);
	}
	return chg;
}

static int hugetlb_acct_memory(long delta)
{
	int ret = -ENOMEM;

	spin_lock(&hugetlb_lock);
1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203
	/*
	 * When cpuset is configured, it breaks the strict hugetlb page
	 * reservation as the accounting is done on a global variable. Such
	 * reservation is completely rubbish in the presence of cpuset because
	 * the reservation is not checked against page availability for the
	 * current cpuset. Application can still potentially OOM'ed by kernel
	 * with lack of free htlb page in cpuset that the task is in.
	 * Attempt to enforce strict accounting with cpuset is almost
	 * impossible (or too ugly) because cpuset is too fluid that
	 * task or memory node can be dynamically moved between cpusets.
	 *
	 * The change of semantics for shared hugetlb mapping with cpuset is
	 * undesirable. However, in order to preserve some of the semantics,
	 * we fall back to check against current free page availability as
	 * a best attempt and hopefully to minimize the impact of changing
	 * semantics that cpuset has.
	 */
1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228
	if (delta > 0) {
		if (gather_surplus_pages(delta) < 0)
			goto out;

		if (delta > cpuset_mems_nr(free_huge_pages_node))
			goto out;
	}

	ret = 0;
	resv_huge_pages += delta;
	if (delta < 0)
		return_unused_surplus_pages((unsigned long) -delta);

out:
	spin_unlock(&hugetlb_lock);
	return ret;
}

int hugetlb_reserve_pages(struct inode *inode, long from, long to)
{
	long ret, chg;

	chg = region_chg(&inode->i_mapping->private_list, from, to);
	if (chg < 0)
		return chg;
1229

1230 1231
	if (hugetlb_get_quota(inode->i_mapping, chg))
		return -ENOSPC;
1232
	ret = hugetlb_acct_memory(chg);
K
Ken Chen 已提交
1233 1234
	if (ret < 0) {
		hugetlb_put_quota(inode->i_mapping, chg);
1235
		return ret;
K
Ken Chen 已提交
1236
	}
1237 1238 1239 1240 1241 1242 1243
	region_add(&inode->i_mapping->private_list, from, to);
	return 0;
}

void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
{
	long chg = region_truncate(&inode->i_mapping->private_list, offset);
K
Ken Chen 已提交
1244 1245 1246 1247 1248

	spin_lock(&inode->i_lock);
	inode->i_blocks -= BLOCKS_PER_HUGEPAGE * freed;
	spin_unlock(&inode->i_lock);

1249 1250
	hugetlb_put_quota(inode->i_mapping, (chg - freed));
	hugetlb_acct_memory(-(chg - freed));
1251
}