hugetlb.c 21.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * Generic hugetlb support.
 * (C) William Irwin, April 2004
 */
#include <linux/gfp.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/sysctl.h>
#include <linux/highmem.h>
#include <linux/nodemask.h>
D
David Gibson 已提交
13
#include <linux/pagemap.h>
14
#include <linux/mempolicy.h>
15
#include <linux/cpuset.h>
16
#include <linux/mutex.h>
17

D
David Gibson 已提交
18 19 20 21
#include <asm/page.h>
#include <asm/pgtable.h>

#include <linux/hugetlb.h>
22
#include "internal.h"
L
Linus Torvalds 已提交
23 24

const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
25
static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
L
Linus Torvalds 已提交
26 27 28 29
unsigned long max_huge_pages;
static struct list_head hugepage_freelists[MAX_NUMNODES];
static unsigned int nr_huge_pages_node[MAX_NUMNODES];
static unsigned int free_huge_pages_node[MAX_NUMNODES];
30 31 32
static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
unsigned long hugepages_treat_as_movable;

33 34 35 36
/*
 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
 */
static DEFINE_SPINLOCK(hugetlb_lock);
37

38 39 40 41 42 43 44
static void clear_huge_page(struct page *page, unsigned long addr)
{
	int i;

	might_sleep();
	for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
		cond_resched();
45
		clear_user_highpage(page + i, addr + i * PAGE_SIZE);
46 47 48 49
	}
}

static void copy_huge_page(struct page *dst, struct page *src,
50
			   unsigned long addr, struct vm_area_struct *vma)
51 52 53 54 55 56
{
	int i;

	might_sleep();
	for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
		cond_resched();
57
		copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
58 59 60
	}
}

L
Linus Torvalds 已提交
61 62 63 64 65 66 67 68
static void enqueue_huge_page(struct page *page)
{
	int nid = page_to_nid(page);
	list_add(&page->lru, &hugepage_freelists[nid]);
	free_huge_pages++;
	free_huge_pages_node[nid]++;
}

69 70
static struct page *dequeue_huge_page(struct vm_area_struct *vma,
				unsigned long address)
L
Linus Torvalds 已提交
71
{
72
	int nid;
L
Linus Torvalds 已提交
73
	struct page *page = NULL;
74
	struct mempolicy *mpol;
75
	struct zonelist *zonelist = huge_zonelist(vma, address,
76
					htlb_alloc_mask, &mpol);
77
	struct zone **z;
L
Linus Torvalds 已提交
78

79
	for (z = zonelist->zones; *z; z++) {
80
		nid = zone_to_nid(*z);
81
		if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) &&
A
Andrew Morton 已提交
82 83 84 85 86 87
		    !list_empty(&hugepage_freelists[nid])) {
			page = list_entry(hugepage_freelists[nid].next,
					  struct page, lru);
			list_del(&page->lru);
			free_huge_pages--;
			free_huge_pages_node[nid]--;
K
Ken Chen 已提交
88
			break;
A
Andrew Morton 已提交
89
		}
L
Linus Torvalds 已提交
90
	}
91
	mpol_free(mpol);	/* unref if mpol !NULL */
L
Linus Torvalds 已提交
92 93 94
	return page;
}

A
Adam Litke 已提交
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
static void update_and_free_page(struct page *page)
{
	int i;
	nr_huge_pages--;
	nr_huge_pages_node[page_to_nid(page)]--;
	for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
		page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
				1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
				1 << PG_private | 1<< PG_writeback);
	}
	set_compound_page_dtor(page, NULL);
	set_page_refcounted(page);
	__free_pages(page, HUGETLB_PAGE_ORDER);
}

110 111 112 113 114 115 116 117 118 119 120
static void free_huge_page(struct page *page)
{
	BUG_ON(page_count(page));

	INIT_LIST_HEAD(&page->lru);

	spin_lock(&hugetlb_lock);
	enqueue_huge_page(page);
	spin_unlock(&hugetlb_lock);
}

N
Nick Piggin 已提交
121
static int alloc_fresh_huge_page(void)
L
Linus Torvalds 已提交
122
{
123
	static int prev_nid;
L
Linus Torvalds 已提交
124
	struct page *page;
125 126
	int nid;

127 128 129 130 131 132 133
	/*
	 * Copy static prev_nid to local nid, work on that, then copy it
	 * back to prev_nid afterwards: otherwise there's a window in which
	 * a racer might pass invalid nid MAX_NUMNODES to alloc_pages_node.
	 * But we don't need to use a spin_lock here: it really doesn't
	 * matter if occasionally a racer chooses the same nid as we do.
	 */
134
	nid = next_node(prev_nid, node_online_map);
135 136
	if (nid == MAX_NUMNODES)
		nid = first_node(node_online_map);
137 138
	prev_nid = nid;

139
	page = alloc_pages_node(nid, htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
140
					HUGETLB_PAGE_ORDER);
L
Linus Torvalds 已提交
141
	if (page) {
142
		set_compound_page_dtor(page, free_huge_page);
143
		spin_lock(&hugetlb_lock);
L
Linus Torvalds 已提交
144 145
		nr_huge_pages++;
		nr_huge_pages_node[page_to_nid(page)]++;
146
		spin_unlock(&hugetlb_lock);
N
Nick Piggin 已提交
147 148
		put_page(page); /* free it into the hugepage allocator */
		return 1;
L
Linus Torvalds 已提交
149
	}
N
Nick Piggin 已提交
150
	return 0;
L
Linus Torvalds 已提交
151 152
}

153 154
static struct page *alloc_huge_page(struct vm_area_struct *vma,
				    unsigned long addr)
L
Linus Torvalds 已提交
155 156 157 158
{
	struct page *page;

	spin_lock(&hugetlb_lock);
159 160 161 162
	if (vma->vm_flags & VM_MAYSHARE)
		resv_huge_pages--;
	else if (free_huge_pages <= resv_huge_pages)
		goto fail;
163 164 165 166 167

	page = dequeue_huge_page(vma, addr);
	if (!page)
		goto fail;

L
Linus Torvalds 已提交
168
	spin_unlock(&hugetlb_lock);
169
	set_page_refcounted(page);
L
Linus Torvalds 已提交
170
	return page;
171

172
fail:
173 174
	if (vma->vm_flags & VM_MAYSHARE)
		resv_huge_pages++;
175 176 177 178
	spin_unlock(&hugetlb_lock);
	return NULL;
}

L
Linus Torvalds 已提交
179 180 181 182
static int __init hugetlb_init(void)
{
	unsigned long i;

183 184 185
	if (HPAGE_SHIFT == 0)
		return 0;

L
Linus Torvalds 已提交
186 187 188 189
	for (i = 0; i < MAX_NUMNODES; ++i)
		INIT_LIST_HEAD(&hugepage_freelists[i]);

	for (i = 0; i < max_huge_pages; ++i) {
N
Nick Piggin 已提交
190
		if (!alloc_fresh_huge_page())
L
Linus Torvalds 已提交
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
			break;
	}
	max_huge_pages = free_huge_pages = nr_huge_pages = i;
	printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
	return 0;
}
module_init(hugetlb_init);

static int __init hugetlb_setup(char *s)
{
	if (sscanf(s, "%lu", &max_huge_pages) <= 0)
		max_huge_pages = 0;
	return 1;
}
__setup("hugepages=", hugetlb_setup);

207 208 209 210 211 212 213 214 215 216 217
static unsigned int cpuset_mems_nr(unsigned int *array)
{
	int node;
	unsigned int nr = 0;

	for_each_node_mask(node, cpuset_current_mems_allowed)
		nr += array[node];

	return nr;
}

L
Linus Torvalds 已提交
218 219 220 221
#ifdef CONFIG_SYSCTL
#ifdef CONFIG_HIGHMEM
static void try_to_free_low(unsigned long count)
{
222 223
	int i;

L
Linus Torvalds 已提交
224 225 226 227 228 229 230 231
	for (i = 0; i < MAX_NUMNODES; ++i) {
		struct page *page, *next;
		list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
			if (PageHighMem(page))
				continue;
			list_del(&page->lru);
			update_and_free_page(page);
			free_huge_pages--;
232
			free_huge_pages_node[page_to_nid(page)]--;
L
Linus Torvalds 已提交
233 234 235 236 237 238 239 240 241 242 243 244 245 246
			if (count >= nr_huge_pages)
				return;
		}
	}
}
#else
static inline void try_to_free_low(unsigned long count)
{
}
#endif

static unsigned long set_max_huge_pages(unsigned long count)
{
	while (count > nr_huge_pages) {
N
Nick Piggin 已提交
247
		if (!alloc_fresh_huge_page())
L
Linus Torvalds 已提交
248 249 250 251 252 253
			return nr_huge_pages;
	}
	if (count >= nr_huge_pages)
		return nr_huge_pages;

	spin_lock(&hugetlb_lock);
254
	count = max(count, resv_huge_pages);
L
Linus Torvalds 已提交
255 256
	try_to_free_low(count);
	while (count < nr_huge_pages) {
257
		struct page *page = dequeue_huge_page(NULL, 0);
L
Linus Torvalds 已提交
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
		if (!page)
			break;
		update_and_free_page(page);
	}
	spin_unlock(&hugetlb_lock);
	return nr_huge_pages;
}

int hugetlb_sysctl_handler(struct ctl_table *table, int write,
			   struct file *file, void __user *buffer,
			   size_t *length, loff_t *ppos)
{
	proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
	max_huge_pages = set_max_huge_pages(max_huge_pages);
	return 0;
}
274 275 276 277 278 279 280 281 282 283 284 285 286

int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
			struct file *file, void __user *buffer,
			size_t *length, loff_t *ppos)
{
	proc_dointvec(table, write, file, buffer, length, ppos);
	if (hugepages_treat_as_movable)
		htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
	else
		htlb_alloc_mask = GFP_HIGHUSER;
	return 0;
}

L
Linus Torvalds 已提交
287 288 289 290 291 292 293
#endif /* CONFIG_SYSCTL */

int hugetlb_report_meminfo(char *buf)
{
	return sprintf(buf,
			"HugePages_Total: %5lu\n"
			"HugePages_Free:  %5lu\n"
294
			"HugePages_Rsvd:  %5lu\n"
L
Linus Torvalds 已提交
295 296 297
			"Hugepagesize:    %5lu kB\n",
			nr_huge_pages,
			free_huge_pages,
298
			resv_huge_pages,
L
Linus Torvalds 已提交
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
			HPAGE_SIZE/1024);
}

int hugetlb_report_node_meminfo(int nid, char *buf)
{
	return sprintf(buf,
		"Node %d HugePages_Total: %5u\n"
		"Node %d HugePages_Free:  %5u\n",
		nid, nr_huge_pages_node[nid],
		nid, free_huge_pages_node[nid]);
}

/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
unsigned long hugetlb_total_pages(void)
{
	return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
}

/*
 * We cannot handle pagefaults against hugetlb pages at all.  They cause
 * handle_mm_fault() to try to instantiate regular-sized pages in the
 * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
 * this far.
 */
N
Nick Piggin 已提交
323
static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
L
Linus Torvalds 已提交
324 325
{
	BUG();
N
Nick Piggin 已提交
326
	return 0;
L
Linus Torvalds 已提交
327 328 329
}

struct vm_operations_struct hugetlb_vm_ops = {
N
Nick Piggin 已提交
330
	.fault = hugetlb_vm_op_fault,
L
Linus Torvalds 已提交
331 332
};

333 334
static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
				int writable)
D
David Gibson 已提交
335 336 337
{
	pte_t entry;

338
	if (writable) {
D
David Gibson 已提交
339 340 341 342 343 344 345 346 347 348 349
		entry =
		    pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
	} else {
		entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
	}
	entry = pte_mkyoung(entry);
	entry = pte_mkhuge(entry);

	return entry;
}

350 351 352 353 354 355
static void set_huge_ptep_writable(struct vm_area_struct *vma,
				   unsigned long address, pte_t *ptep)
{
	pte_t entry;

	entry = pte_mkwrite(pte_mkdirty(*ptep));
356 357 358
	if (ptep_set_access_flags(vma, address, ptep, entry, 1)) {
		update_mmu_cache(vma, address, entry);
	}
359 360 361
}


D
David Gibson 已提交
362 363 364 365 366
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
			    struct vm_area_struct *vma)
{
	pte_t *src_pte, *dst_pte, entry;
	struct page *ptepage;
367
	unsigned long addr;
368 369 370
	int cow;

	cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
D
David Gibson 已提交
371

372
	for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
H
Hugh Dickins 已提交
373 374 375
		src_pte = huge_pte_offset(src, addr);
		if (!src_pte)
			continue;
D
David Gibson 已提交
376 377 378
		dst_pte = huge_pte_alloc(dst, addr);
		if (!dst_pte)
			goto nomem;
H
Hugh Dickins 已提交
379
		spin_lock(&dst->page_table_lock);
380
		spin_lock(&src->page_table_lock);
H
Hugh Dickins 已提交
381
		if (!pte_none(*src_pte)) {
382 383
			if (cow)
				ptep_set_wrprotect(src, addr, src_pte);
384 385 386 387 388 389
			entry = *src_pte;
			ptepage = pte_page(entry);
			get_page(ptepage);
			set_huge_pte_at(dst, addr, dst_pte, entry);
		}
		spin_unlock(&src->page_table_lock);
H
Hugh Dickins 已提交
390
		spin_unlock(&dst->page_table_lock);
D
David Gibson 已提交
391 392 393 394 395 396 397
	}
	return 0;

nomem:
	return -ENOMEM;
}

398 399
void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
			    unsigned long end)
D
David Gibson 已提交
400 401 402
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long address;
403
	pte_t *ptep;
D
David Gibson 已提交
404 405
	pte_t pte;
	struct page *page;
406
	struct page *tmp;
407 408 409 410 411
	/*
	 * A page gathering list, protected by per file i_mmap_lock. The
	 * lock is used to avoid list corruption from multiple unmapping
	 * of the same page since we are using page->lru.
	 */
412
	LIST_HEAD(page_list);
D
David Gibson 已提交
413 414 415 416 417

	WARN_ON(!is_vm_hugetlb_page(vma));
	BUG_ON(start & ~HPAGE_MASK);
	BUG_ON(end & ~HPAGE_MASK);

418
	spin_lock(&mm->page_table_lock);
D
David Gibson 已提交
419
	for (address = start; address < end; address += HPAGE_SIZE) {
420
		ptep = huge_pte_offset(mm, address);
A
Adam Litke 已提交
421
		if (!ptep)
422 423
			continue;

424 425 426
		if (huge_pmd_unshare(mm, &address, ptep))
			continue;

427
		pte = huge_ptep_get_and_clear(mm, address, ptep);
D
David Gibson 已提交
428 429
		if (pte_none(pte))
			continue;
430

D
David Gibson 已提交
431
		page = pte_page(pte);
432 433
		if (pte_dirty(pte))
			set_page_dirty(page);
434
		list_add(&page->lru, &page_list);
D
David Gibson 已提交
435
	}
L
Linus Torvalds 已提交
436
	spin_unlock(&mm->page_table_lock);
437
	flush_tlb_range(vma, start, end);
438 439 440 441
	list_for_each_entry_safe(page, tmp, &page_list, lru) {
		list_del(&page->lru);
		put_page(page);
	}
L
Linus Torvalds 已提交
442
}
D
David Gibson 已提交
443

444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
			  unsigned long end)
{
	/*
	 * It is undesirable to test vma->vm_file as it should be non-null
	 * for valid hugetlb area. However, vm_file will be NULL in the error
	 * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
	 * do_mmap_pgoff() nullifies vma->vm_file before calling this function
	 * to clean up. Since no pte has actually been setup, it is safe to
	 * do nothing in this case.
	 */
	if (vma->vm_file) {
		spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
		__unmap_hugepage_range(vma, start, end);
		spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
	}
}

462 463 464 465
static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
			unsigned long address, pte_t *ptep, pte_t pte)
{
	struct page *old_page, *new_page;
466
	int avoidcopy;
467 468 469 470 471 472 473 474

	old_page = pte_page(pte);

	/* If no-one else is actually using this page, avoid the copy
	 * and just make the page writable */
	avoidcopy = (page_count(old_page) == 1);
	if (avoidcopy) {
		set_huge_ptep_writable(vma, address, ptep);
N
Nick Piggin 已提交
475
		return 0;
476 477 478
	}

	page_cache_get(old_page);
479
	new_page = alloc_huge_page(vma, address);
480 481 482

	if (!new_page) {
		page_cache_release(old_page);
483
		return VM_FAULT_OOM;
484 485 486
	}

	spin_unlock(&mm->page_table_lock);
487
	copy_huge_page(new_page, old_page, address, vma);
488 489 490 491 492 493 494 495 496 497 498 499
	spin_lock(&mm->page_table_lock);

	ptep = huge_pte_offset(mm, address & HPAGE_MASK);
	if (likely(pte_same(*ptep, pte))) {
		/* Break COW */
		set_huge_pte_at(mm, address, ptep,
				make_huge_pte(vma, new_page, 1));
		/* Make the old page be freed below */
		new_page = old_page;
	}
	page_cache_release(new_page);
	page_cache_release(old_page);
N
Nick Piggin 已提交
500
	return 0;
501 502
}

503
static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
504
			unsigned long address, pte_t *ptep, int write_access)
505 506
{
	int ret = VM_FAULT_SIGBUS;
A
Adam Litke 已提交
507 508 509 510
	unsigned long idx;
	unsigned long size;
	struct page *page;
	struct address_space *mapping;
511
	pte_t new_pte;
A
Adam Litke 已提交
512 513 514 515 516 517 518 519 520

	mapping = vma->vm_file->f_mapping;
	idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
		+ (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));

	/*
	 * Use page lock to guard against racing truncation
	 * before we get page_table_lock.
	 */
521 522 523
retry:
	page = find_lock_page(mapping, idx);
	if (!page) {
524 525 526
		size = i_size_read(mapping->host) >> HPAGE_SHIFT;
		if (idx >= size)
			goto out;
527 528 529 530 531
		if (hugetlb_get_quota(mapping))
			goto out;
		page = alloc_huge_page(vma, address);
		if (!page) {
			hugetlb_put_quota(mapping);
532
			ret = VM_FAULT_OOM;
533 534
			goto out;
		}
535
		clear_huge_page(page, address);
536

537 538 539 540 541 542 543 544 545 546 547 548 549 550
		if (vma->vm_flags & VM_SHARED) {
			int err;

			err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
			if (err) {
				put_page(page);
				hugetlb_put_quota(mapping);
				if (err == -EEXIST)
					goto retry;
				goto out;
			}
		} else
			lock_page(page);
	}
551

552
	spin_lock(&mm->page_table_lock);
A
Adam Litke 已提交
553 554 555 556
	size = i_size_read(mapping->host) >> HPAGE_SHIFT;
	if (idx >= size)
		goto backout;

N
Nick Piggin 已提交
557
	ret = 0;
558
	if (!pte_none(*ptep))
A
Adam Litke 已提交
559 560
		goto backout;

561 562 563 564 565 566 567 568 569
	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
				&& (vma->vm_flags & VM_SHARED)));
	set_huge_pte_at(mm, address, ptep, new_pte);

	if (write_access && !(vma->vm_flags & VM_SHARED)) {
		/* Optimization, do the COW without a second fault */
		ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
	}

570
	spin_unlock(&mm->page_table_lock);
A
Adam Litke 已提交
571 572
	unlock_page(page);
out:
573
	return ret;
A
Adam Litke 已提交
574 575 576 577 578 579 580

backout:
	spin_unlock(&mm->page_table_lock);
	hugetlb_put_quota(mapping);
	unlock_page(page);
	put_page(page);
	goto out;
581 582
}

583 584 585 586 587
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
			unsigned long address, int write_access)
{
	pte_t *ptep;
	pte_t entry;
588
	int ret;
589
	static DEFINE_MUTEX(hugetlb_instantiation_mutex);
590 591 592 593 594

	ptep = huge_pte_alloc(mm, address);
	if (!ptep)
		return VM_FAULT_OOM;

595 596 597 598 599 600
	/*
	 * Serialize hugepage allocation and instantiation, so that we don't
	 * get spurious allocation failures if two CPUs race to instantiate
	 * the same page in the page cache.
	 */
	mutex_lock(&hugetlb_instantiation_mutex);
601
	entry = *ptep;
602 603 604 605 606
	if (pte_none(entry)) {
		ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
		mutex_unlock(&hugetlb_instantiation_mutex);
		return ret;
	}
607

N
Nick Piggin 已提交
608
	ret = 0;
609 610 611 612 613 614 615

	spin_lock(&mm->page_table_lock);
	/* Check for a racing update before calling hugetlb_cow */
	if (likely(pte_same(entry, *ptep)))
		if (write_access && !pte_write(entry))
			ret = hugetlb_cow(mm, vma, address, ptep, entry);
	spin_unlock(&mm->page_table_lock);
616
	mutex_unlock(&hugetlb_instantiation_mutex);
617 618

	return ret;
619 620
}

D
David Gibson 已提交
621 622 623 624
int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
			struct page **pages, struct vm_area_struct **vmas,
			unsigned long *position, int *length, int i)
{
625 626
	unsigned long pfn_offset;
	unsigned long vaddr = *position;
D
David Gibson 已提交
627 628
	int remainder = *length;

629
	spin_lock(&mm->page_table_lock);
D
David Gibson 已提交
630
	while (vaddr < vma->vm_end && remainder) {
A
Adam Litke 已提交
631 632
		pte_t *pte;
		struct page *page;
D
David Gibson 已提交
633

A
Adam Litke 已提交
634 635 636 637 638 639
		/*
		 * Some archs (sparc64, sh*) have multiple pte_ts to
		 * each hugepage.  We have to make * sure we get the
		 * first, for the page indexing below to work.
		 */
		pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
D
David Gibson 已提交
640

A
Adam Litke 已提交
641 642
		if (!pte || pte_none(*pte)) {
			int ret;
D
David Gibson 已提交
643

A
Adam Litke 已提交
644 645 646
			spin_unlock(&mm->page_table_lock);
			ret = hugetlb_fault(mm, vma, vaddr, 0);
			spin_lock(&mm->page_table_lock);
647
			if (!(ret & VM_FAULT_ERROR))
A
Adam Litke 已提交
648
				continue;
D
David Gibson 已提交
649

A
Adam Litke 已提交
650 651 652 653 654 655
			remainder = 0;
			if (!i)
				i = -EFAULT;
			break;
		}

656 657 658
		pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
		page = pte_page(*pte);
same_page:
659 660
		if (pages) {
			get_page(page);
661
			pages[i] = page + pfn_offset;
662
		}
D
David Gibson 已提交
663 664 665 666 667

		if (vmas)
			vmas[i] = vma;

		vaddr += PAGE_SIZE;
668
		++pfn_offset;
D
David Gibson 已提交
669 670
		--remainder;
		++i;
671 672 673 674 675 676 677 678
		if (vaddr < vma->vm_end && remainder &&
				pfn_offset < HPAGE_SIZE/PAGE_SIZE) {
			/*
			 * We use pfn_offset to avoid touching the pageframes
			 * of this compound page.
			 */
			goto same_page;
		}
D
David Gibson 已提交
679
	}
680
	spin_unlock(&mm->page_table_lock);
D
David Gibson 已提交
681 682 683 684 685
	*length = remainder;
	*position = vaddr;

	return i;
}
686 687 688 689 690 691 692 693 694 695 696 697

void hugetlb_change_protection(struct vm_area_struct *vma,
		unsigned long address, unsigned long end, pgprot_t newprot)
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long start = address;
	pte_t *ptep;
	pte_t pte;

	BUG_ON(address >= end);
	flush_cache_range(vma, address, end);

698
	spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
699 700 701 702 703
	spin_lock(&mm->page_table_lock);
	for (; address < end; address += HPAGE_SIZE) {
		ptep = huge_pte_offset(mm, address);
		if (!ptep)
			continue;
704 705
		if (huge_pmd_unshare(mm, &address, ptep))
			continue;
706 707 708 709 710 711 712
		if (!pte_none(*ptep)) {
			pte = huge_ptep_get_and_clear(mm, address, ptep);
			pte = pte_mkhuge(pte_modify(pte, newprot));
			set_huge_pte_at(mm, address, ptep, pte);
		}
	}
	spin_unlock(&mm->page_table_lock);
713
	spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
714 715 716 717

	flush_tlb_range(vma, start, end);
}

718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858
struct file_region {
	struct list_head link;
	long from;
	long to;
};

static long region_add(struct list_head *head, long f, long t)
{
	struct file_region *rg, *nrg, *trg;

	/* Locate the region we are either in or before. */
	list_for_each_entry(rg, head, link)
		if (f <= rg->to)
			break;

	/* Round our left edge to the current segment if it encloses us. */
	if (f > rg->from)
		f = rg->from;

	/* Check for and consume any regions we now overlap with. */
	nrg = rg;
	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
		if (&rg->link == head)
			break;
		if (rg->from > t)
			break;

		/* If this area reaches higher then extend our area to
		 * include it completely.  If this is not the first area
		 * which we intend to reuse, free it. */
		if (rg->to > t)
			t = rg->to;
		if (rg != nrg) {
			list_del(&rg->link);
			kfree(rg);
		}
	}
	nrg->from = f;
	nrg->to = t;
	return 0;
}

static long region_chg(struct list_head *head, long f, long t)
{
	struct file_region *rg, *nrg;
	long chg = 0;

	/* Locate the region we are before or in. */
	list_for_each_entry(rg, head, link)
		if (f <= rg->to)
			break;

	/* If we are below the current region then a new region is required.
	 * Subtle, allocate a new region at the position but make it zero
	 * size such that we can guarentee to record the reservation. */
	if (&rg->link == head || t < rg->from) {
		nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
		if (nrg == 0)
			return -ENOMEM;
		nrg->from = f;
		nrg->to   = f;
		INIT_LIST_HEAD(&nrg->link);
		list_add(&nrg->link, rg->link.prev);

		return t - f;
	}

	/* Round our left edge to the current segment if it encloses us. */
	if (f > rg->from)
		f = rg->from;
	chg = t - f;

	/* Check for and consume any regions we now overlap with. */
	list_for_each_entry(rg, rg->link.prev, link) {
		if (&rg->link == head)
			break;
		if (rg->from > t)
			return chg;

		/* We overlap with this area, if it extends futher than
		 * us then we must extend ourselves.  Account for its
		 * existing reservation. */
		if (rg->to > t) {
			chg += rg->to - t;
			t = rg->to;
		}
		chg -= rg->to - rg->from;
	}
	return chg;
}

static long region_truncate(struct list_head *head, long end)
{
	struct file_region *rg, *trg;
	long chg = 0;

	/* Locate the region we are either in or before. */
	list_for_each_entry(rg, head, link)
		if (end <= rg->to)
			break;
	if (&rg->link == head)
		return 0;

	/* If we are in the middle of a region then adjust it. */
	if (end > rg->from) {
		chg = rg->to - end;
		rg->to = end;
		rg = list_entry(rg->link.next, typeof(*rg), link);
	}

	/* Drop any remaining regions. */
	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
		if (&rg->link == head)
			break;
		chg += rg->to - rg->from;
		list_del(&rg->link);
		kfree(rg);
	}
	return chg;
}

static int hugetlb_acct_memory(long delta)
{
	int ret = -ENOMEM;

	spin_lock(&hugetlb_lock);
	if ((delta + resv_huge_pages) <= free_huge_pages) {
		resv_huge_pages += delta;
		ret = 0;
	}
	spin_unlock(&hugetlb_lock);
	return ret;
}

int hugetlb_reserve_pages(struct inode *inode, long from, long to)
{
	long ret, chg;

	chg = region_chg(&inode->i_mapping->private_list, from, to);
	if (chg < 0)
		return chg;
859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878
	/*
	 * When cpuset is configured, it breaks the strict hugetlb page
	 * reservation as the accounting is done on a global variable. Such
	 * reservation is completely rubbish in the presence of cpuset because
	 * the reservation is not checked against page availability for the
	 * current cpuset. Application can still potentially OOM'ed by kernel
	 * with lack of free htlb page in cpuset that the task is in.
	 * Attempt to enforce strict accounting with cpuset is almost
	 * impossible (or too ugly) because cpuset is too fluid that
	 * task or memory node can be dynamically moved between cpusets.
	 *
	 * The change of semantics for shared hugetlb mapping with cpuset is
	 * undesirable. However, in order to preserve some of the semantics,
	 * we fall back to check against current free page availability as
	 * a best attempt and hopefully to minimize the impact of changing
	 * semantics that cpuset has.
	 */
	if (chg > cpuset_mems_nr(free_huge_pages_node))
		return -ENOMEM;

879 880 881 882 883 884 885 886 887 888 889 890
	ret = hugetlb_acct_memory(chg);
	if (ret < 0)
		return ret;
	region_add(&inode->i_mapping->private_list, from, to);
	return 0;
}

void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
{
	long chg = region_truncate(&inode->i_mapping->private_list, offset);
	hugetlb_acct_memory(freed - chg);
}