hugetlb.c 20.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * Generic hugetlb support.
 * (C) William Irwin, April 2004
 */
#include <linux/gfp.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/sysctl.h>
#include <linux/highmem.h>
#include <linux/nodemask.h>
D
David Gibson 已提交
13
#include <linux/pagemap.h>
14
#include <linux/mempolicy.h>
15
#include <linux/cpuset.h>
16
#include <linux/mutex.h>
17

D
David Gibson 已提交
18 19 20 21
#include <asm/page.h>
#include <asm/pgtable.h>

#include <linux/hugetlb.h>
22
#include "internal.h"
L
Linus Torvalds 已提交
23 24

const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
25
static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
L
Linus Torvalds 已提交
26 27 28 29
unsigned long max_huge_pages;
static struct list_head hugepage_freelists[MAX_NUMNODES];
static unsigned int nr_huge_pages_node[MAX_NUMNODES];
static unsigned int free_huge_pages_node[MAX_NUMNODES];
30 31 32 33
/*
 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
 */
static DEFINE_SPINLOCK(hugetlb_lock);
34

35 36 37 38 39 40 41 42 43 44 45 46
static void clear_huge_page(struct page *page, unsigned long addr)
{
	int i;

	might_sleep();
	for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
		cond_resched();
		clear_user_highpage(page + i, addr);
	}
}

static void copy_huge_page(struct page *dst, struct page *src,
47
			   unsigned long addr, struct vm_area_struct *vma)
48 49 50 51 52 53
{
	int i;

	might_sleep();
	for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
		cond_resched();
54
		copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
55 56 57
	}
}

L
Linus Torvalds 已提交
58 59 60 61 62 63 64 65
static void enqueue_huge_page(struct page *page)
{
	int nid = page_to_nid(page);
	list_add(&page->lru, &hugepage_freelists[nid]);
	free_huge_pages++;
	free_huge_pages_node[nid]++;
}

66 67
static struct page *dequeue_huge_page(struct vm_area_struct *vma,
				unsigned long address)
L
Linus Torvalds 已提交
68
{
69
	int nid;
L
Linus Torvalds 已提交
70
	struct page *page = NULL;
71
	struct zonelist *zonelist = huge_zonelist(vma, address);
72
	struct zone **z;
L
Linus Torvalds 已提交
73

74
	for (z = zonelist->zones; *z; z++) {
75
		nid = zone_to_nid(*z);
76
		if (cpuset_zone_allowed_softwall(*z, GFP_HIGHUSER) &&
77
		    !list_empty(&hugepage_freelists[nid]))
78
			break;
L
Linus Torvalds 已提交
79
	}
80 81

	if (*z) {
L
Linus Torvalds 已提交
82 83 84 85 86 87 88 89 90
		page = list_entry(hugepage_freelists[nid].next,
				  struct page, lru);
		list_del(&page->lru);
		free_huge_pages--;
		free_huge_pages_node[nid]--;
	}
	return page;
}

91 92 93 94 95 96 97 98 99 100 101
static void free_huge_page(struct page *page)
{
	BUG_ON(page_count(page));

	INIT_LIST_HEAD(&page->lru);

	spin_lock(&hugetlb_lock);
	enqueue_huge_page(page);
	spin_unlock(&hugetlb_lock);
}

N
Nick Piggin 已提交
102
static int alloc_fresh_huge_page(void)
L
Linus Torvalds 已提交
103
{
104
	static int prev_nid;
L
Linus Torvalds 已提交
105
	struct page *page;
106 107 108 109 110
	static DEFINE_SPINLOCK(nid_lock);
	int nid;

	spin_lock(&nid_lock);
	nid = next_node(prev_nid, node_online_map);
111 112
	if (nid == MAX_NUMNODES)
		nid = first_node(node_online_map);
113 114 115 116 117
	prev_nid = nid;
	spin_unlock(&nid_lock);

	page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN,
					HUGETLB_PAGE_ORDER);
L
Linus Torvalds 已提交
118
	if (page) {
119
		set_compound_page_dtor(page, free_huge_page);
120
		spin_lock(&hugetlb_lock);
L
Linus Torvalds 已提交
121 122
		nr_huge_pages++;
		nr_huge_pages_node[page_to_nid(page)]++;
123
		spin_unlock(&hugetlb_lock);
N
Nick Piggin 已提交
124 125
		put_page(page); /* free it into the hugepage allocator */
		return 1;
L
Linus Torvalds 已提交
126
	}
N
Nick Piggin 已提交
127
	return 0;
L
Linus Torvalds 已提交
128 129
}

130 131
static struct page *alloc_huge_page(struct vm_area_struct *vma,
				    unsigned long addr)
L
Linus Torvalds 已提交
132 133 134 135
{
	struct page *page;

	spin_lock(&hugetlb_lock);
136 137 138 139
	if (vma->vm_flags & VM_MAYSHARE)
		resv_huge_pages--;
	else if (free_huge_pages <= resv_huge_pages)
		goto fail;
140 141 142 143 144

	page = dequeue_huge_page(vma, addr);
	if (!page)
		goto fail;

L
Linus Torvalds 已提交
145
	spin_unlock(&hugetlb_lock);
146
	set_page_refcounted(page);
L
Linus Torvalds 已提交
147
	return page;
148

149
fail:
150 151
	if (vma->vm_flags & VM_MAYSHARE)
		resv_huge_pages++;
152 153 154 155
	spin_unlock(&hugetlb_lock);
	return NULL;
}

L
Linus Torvalds 已提交
156 157 158 159
static int __init hugetlb_init(void)
{
	unsigned long i;

160 161 162
	if (HPAGE_SHIFT == 0)
		return 0;

L
Linus Torvalds 已提交
163 164 165 166
	for (i = 0; i < MAX_NUMNODES; ++i)
		INIT_LIST_HEAD(&hugepage_freelists[i]);

	for (i = 0; i < max_huge_pages; ++i) {
N
Nick Piggin 已提交
167
		if (!alloc_fresh_huge_page())
L
Linus Torvalds 已提交
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
			break;
	}
	max_huge_pages = free_huge_pages = nr_huge_pages = i;
	printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
	return 0;
}
module_init(hugetlb_init);

static int __init hugetlb_setup(char *s)
{
	if (sscanf(s, "%lu", &max_huge_pages) <= 0)
		max_huge_pages = 0;
	return 1;
}
__setup("hugepages=", hugetlb_setup);

184 185 186 187 188 189 190 191 192 193 194
static unsigned int cpuset_mems_nr(unsigned int *array)
{
	int node;
	unsigned int nr = 0;

	for_each_node_mask(node, cpuset_current_mems_allowed)
		nr += array[node];

	return nr;
}

L
Linus Torvalds 已提交
195 196 197 198 199
#ifdef CONFIG_SYSCTL
static void update_and_free_page(struct page *page)
{
	int i;
	nr_huge_pages--;
200
	nr_huge_pages_node[page_to_nid(page)]--;
L
Linus Torvalds 已提交
201 202 203 204 205
	for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
		page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
				1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
				1 << PG_private | 1<< PG_writeback);
	}
N
Nick Piggin 已提交
206
	page[1].lru.next = NULL;
207
	set_page_refcounted(page);
L
Linus Torvalds 已提交
208 209 210 211 212 213
	__free_pages(page, HUGETLB_PAGE_ORDER);
}

#ifdef CONFIG_HIGHMEM
static void try_to_free_low(unsigned long count)
{
214 215
	int i;

L
Linus Torvalds 已提交
216 217 218 219 220 221 222 223
	for (i = 0; i < MAX_NUMNODES; ++i) {
		struct page *page, *next;
		list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
			if (PageHighMem(page))
				continue;
			list_del(&page->lru);
			update_and_free_page(page);
			free_huge_pages--;
224
			free_huge_pages_node[page_to_nid(page)]--;
L
Linus Torvalds 已提交
225 226 227 228 229 230 231 232 233 234 235 236 237 238
			if (count >= nr_huge_pages)
				return;
		}
	}
}
#else
static inline void try_to_free_low(unsigned long count)
{
}
#endif

static unsigned long set_max_huge_pages(unsigned long count)
{
	while (count > nr_huge_pages) {
N
Nick Piggin 已提交
239
		if (!alloc_fresh_huge_page())
L
Linus Torvalds 已提交
240 241 242 243 244 245
			return nr_huge_pages;
	}
	if (count >= nr_huge_pages)
		return nr_huge_pages;

	spin_lock(&hugetlb_lock);
246
	count = max(count, resv_huge_pages);
L
Linus Torvalds 已提交
247 248
	try_to_free_low(count);
	while (count < nr_huge_pages) {
249
		struct page *page = dequeue_huge_page(NULL, 0);
L
Linus Torvalds 已提交
250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
		if (!page)
			break;
		update_and_free_page(page);
	}
	spin_unlock(&hugetlb_lock);
	return nr_huge_pages;
}

int hugetlb_sysctl_handler(struct ctl_table *table, int write,
			   struct file *file, void __user *buffer,
			   size_t *length, loff_t *ppos)
{
	proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
	max_huge_pages = set_max_huge_pages(max_huge_pages);
	return 0;
}
#endif /* CONFIG_SYSCTL */

int hugetlb_report_meminfo(char *buf)
{
	return sprintf(buf,
			"HugePages_Total: %5lu\n"
			"HugePages_Free:  %5lu\n"
273
			"HugePages_Rsvd:  %5lu\n"
L
Linus Torvalds 已提交
274 275 276
			"Hugepagesize:    %5lu kB\n",
			nr_huge_pages,
			free_huge_pages,
277
			resv_huge_pages,
L
Linus Torvalds 已提交
278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
			HPAGE_SIZE/1024);
}

int hugetlb_report_node_meminfo(int nid, char *buf)
{
	return sprintf(buf,
		"Node %d HugePages_Total: %5u\n"
		"Node %d HugePages_Free:  %5u\n",
		nid, nr_huge_pages_node[nid],
		nid, free_huge_pages_node[nid]);
}

/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
unsigned long hugetlb_total_pages(void)
{
	return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
}

/*
 * We cannot handle pagefaults against hugetlb pages at all.  They cause
 * handle_mm_fault() to try to instantiate regular-sized pages in the
 * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
 * this far.
 */
static struct page *hugetlb_nopage(struct vm_area_struct *vma,
				unsigned long address, int *unused)
{
	BUG();
	return NULL;
}

struct vm_operations_struct hugetlb_vm_ops = {
	.nopage = hugetlb_nopage,
};

313 314
static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
				int writable)
D
David Gibson 已提交
315 316 317
{
	pte_t entry;

318
	if (writable) {
D
David Gibson 已提交
319 320 321 322 323 324 325 326 327 328 329
		entry =
		    pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
	} else {
		entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
	}
	entry = pte_mkyoung(entry);
	entry = pte_mkhuge(entry);

	return entry;
}

330 331 332 333 334 335
static void set_huge_ptep_writable(struct vm_area_struct *vma,
				   unsigned long address, pte_t *ptep)
{
	pte_t entry;

	entry = pte_mkwrite(pte_mkdirty(*ptep));
336 337 338 339
	if (ptep_set_access_flags(vma, address, ptep, entry, 1)) {
		update_mmu_cache(vma, address, entry);
		lazy_mmu_prot_update(entry);
	}
340 341 342
}


D
David Gibson 已提交
343 344 345 346 347
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
			    struct vm_area_struct *vma)
{
	pte_t *src_pte, *dst_pte, entry;
	struct page *ptepage;
348
	unsigned long addr;
349 350 351
	int cow;

	cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
D
David Gibson 已提交
352

353
	for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
H
Hugh Dickins 已提交
354 355 356
		src_pte = huge_pte_offset(src, addr);
		if (!src_pte)
			continue;
D
David Gibson 已提交
357 358 359
		dst_pte = huge_pte_alloc(dst, addr);
		if (!dst_pte)
			goto nomem;
H
Hugh Dickins 已提交
360
		spin_lock(&dst->page_table_lock);
361
		spin_lock(&src->page_table_lock);
H
Hugh Dickins 已提交
362
		if (!pte_none(*src_pte)) {
363 364
			if (cow)
				ptep_set_wrprotect(src, addr, src_pte);
365 366 367 368 369 370
			entry = *src_pte;
			ptepage = pte_page(entry);
			get_page(ptepage);
			set_huge_pte_at(dst, addr, dst_pte, entry);
		}
		spin_unlock(&src->page_table_lock);
H
Hugh Dickins 已提交
371
		spin_unlock(&dst->page_table_lock);
D
David Gibson 已提交
372 373 374 375 376 377 378
	}
	return 0;

nomem:
	return -ENOMEM;
}

379 380
void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
			    unsigned long end)
D
David Gibson 已提交
381 382 383
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long address;
384
	pte_t *ptep;
D
David Gibson 已提交
385 386
	pte_t pte;
	struct page *page;
387
	struct page *tmp;
388 389 390 391 392
	/*
	 * A page gathering list, protected by per file i_mmap_lock. The
	 * lock is used to avoid list corruption from multiple unmapping
	 * of the same page since we are using page->lru.
	 */
393
	LIST_HEAD(page_list);
D
David Gibson 已提交
394 395 396 397 398

	WARN_ON(!is_vm_hugetlb_page(vma));
	BUG_ON(start & ~HPAGE_MASK);
	BUG_ON(end & ~HPAGE_MASK);

399
	spin_lock(&mm->page_table_lock);
D
David Gibson 已提交
400
	for (address = start; address < end; address += HPAGE_SIZE) {
401
		ptep = huge_pte_offset(mm, address);
A
Adam Litke 已提交
402
		if (!ptep)
403 404
			continue;

405 406 407
		if (huge_pmd_unshare(mm, &address, ptep))
			continue;

408
		pte = huge_ptep_get_and_clear(mm, address, ptep);
D
David Gibson 已提交
409 410
		if (pte_none(pte))
			continue;
411

D
David Gibson 已提交
412
		page = pte_page(pte);
413 414
		if (pte_dirty(pte))
			set_page_dirty(page);
415
		list_add(&page->lru, &page_list);
D
David Gibson 已提交
416
	}
L
Linus Torvalds 已提交
417
	spin_unlock(&mm->page_table_lock);
418
	flush_tlb_range(vma, start, end);
419 420 421 422
	list_for_each_entry_safe(page, tmp, &page_list, lru) {
		list_del(&page->lru);
		put_page(page);
	}
L
Linus Torvalds 已提交
423
}
D
David Gibson 已提交
424

425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
			  unsigned long end)
{
	/*
	 * It is undesirable to test vma->vm_file as it should be non-null
	 * for valid hugetlb area. However, vm_file will be NULL in the error
	 * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
	 * do_mmap_pgoff() nullifies vma->vm_file before calling this function
	 * to clean up. Since no pte has actually been setup, it is safe to
	 * do nothing in this case.
	 */
	if (vma->vm_file) {
		spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
		__unmap_hugepage_range(vma, start, end);
		spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
	}
}

443 444 445 446
static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
			unsigned long address, pte_t *ptep, pte_t pte)
{
	struct page *old_page, *new_page;
447
	int avoidcopy;
448 449 450 451 452 453 454 455 456 457 458 459

	old_page = pte_page(pte);

	/* If no-one else is actually using this page, avoid the copy
	 * and just make the page writable */
	avoidcopy = (page_count(old_page) == 1);
	if (avoidcopy) {
		set_huge_ptep_writable(vma, address, ptep);
		return VM_FAULT_MINOR;
	}

	page_cache_get(old_page);
460
	new_page = alloc_huge_page(vma, address);
461 462 463

	if (!new_page) {
		page_cache_release(old_page);
464
		return VM_FAULT_OOM;
465 466 467
	}

	spin_unlock(&mm->page_table_lock);
468
	copy_huge_page(new_page, old_page, address, vma);
469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
	spin_lock(&mm->page_table_lock);

	ptep = huge_pte_offset(mm, address & HPAGE_MASK);
	if (likely(pte_same(*ptep, pte))) {
		/* Break COW */
		set_huge_pte_at(mm, address, ptep,
				make_huge_pte(vma, new_page, 1));
		/* Make the old page be freed below */
		new_page = old_page;
	}
	page_cache_release(new_page);
	page_cache_release(old_page);
	return VM_FAULT_MINOR;
}

484
int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
485
			unsigned long address, pte_t *ptep, int write_access)
486 487
{
	int ret = VM_FAULT_SIGBUS;
A
Adam Litke 已提交
488 489 490 491
	unsigned long idx;
	unsigned long size;
	struct page *page;
	struct address_space *mapping;
492
	pte_t new_pte;
A
Adam Litke 已提交
493 494 495 496 497 498 499 500 501

	mapping = vma->vm_file->f_mapping;
	idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
		+ (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));

	/*
	 * Use page lock to guard against racing truncation
	 * before we get page_table_lock.
	 */
502 503 504
retry:
	page = find_lock_page(mapping, idx);
	if (!page) {
505 506 507
		size = i_size_read(mapping->host) >> HPAGE_SHIFT;
		if (idx >= size)
			goto out;
508 509 510 511 512
		if (hugetlb_get_quota(mapping))
			goto out;
		page = alloc_huge_page(vma, address);
		if (!page) {
			hugetlb_put_quota(mapping);
513
			ret = VM_FAULT_OOM;
514 515
			goto out;
		}
516
		clear_huge_page(page, address);
517

518 519 520 521 522 523 524 525 526 527 528 529 530 531
		if (vma->vm_flags & VM_SHARED) {
			int err;

			err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
			if (err) {
				put_page(page);
				hugetlb_put_quota(mapping);
				if (err == -EEXIST)
					goto retry;
				goto out;
			}
		} else
			lock_page(page);
	}
532

533
	spin_lock(&mm->page_table_lock);
A
Adam Litke 已提交
534 535 536 537 538
	size = i_size_read(mapping->host) >> HPAGE_SHIFT;
	if (idx >= size)
		goto backout;

	ret = VM_FAULT_MINOR;
539
	if (!pte_none(*ptep))
A
Adam Litke 已提交
540 541
		goto backout;

542 543 544 545 546 547 548 549 550
	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
				&& (vma->vm_flags & VM_SHARED)));
	set_huge_pte_at(mm, address, ptep, new_pte);

	if (write_access && !(vma->vm_flags & VM_SHARED)) {
		/* Optimization, do the COW without a second fault */
		ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
	}

551
	spin_unlock(&mm->page_table_lock);
A
Adam Litke 已提交
552 553
	unlock_page(page);
out:
554
	return ret;
A
Adam Litke 已提交
555 556 557 558 559 560 561

backout:
	spin_unlock(&mm->page_table_lock);
	hugetlb_put_quota(mapping);
	unlock_page(page);
	put_page(page);
	goto out;
562 563
}

564 565 566 567 568
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
			unsigned long address, int write_access)
{
	pte_t *ptep;
	pte_t entry;
569
	int ret;
570
	static DEFINE_MUTEX(hugetlb_instantiation_mutex);
571 572 573 574 575

	ptep = huge_pte_alloc(mm, address);
	if (!ptep)
		return VM_FAULT_OOM;

576 577 578 579 580 581
	/*
	 * Serialize hugepage allocation and instantiation, so that we don't
	 * get spurious allocation failures if two CPUs race to instantiate
	 * the same page in the page cache.
	 */
	mutex_lock(&hugetlb_instantiation_mutex);
582
	entry = *ptep;
583 584 585 586 587
	if (pte_none(entry)) {
		ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
		mutex_unlock(&hugetlb_instantiation_mutex);
		return ret;
	}
588

589 590 591 592 593 594 595 596
	ret = VM_FAULT_MINOR;

	spin_lock(&mm->page_table_lock);
	/* Check for a racing update before calling hugetlb_cow */
	if (likely(pte_same(entry, *ptep)))
		if (write_access && !pte_write(entry))
			ret = hugetlb_cow(mm, vma, address, ptep, entry);
	spin_unlock(&mm->page_table_lock);
597
	mutex_unlock(&hugetlb_instantiation_mutex);
598 599

	return ret;
600 601
}

D
David Gibson 已提交
602 603 604 605
int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
			struct page **pages, struct vm_area_struct **vmas,
			unsigned long *position, int *length, int i)
{
606 607
	unsigned long pfn_offset;
	unsigned long vaddr = *position;
D
David Gibson 已提交
608 609
	int remainder = *length;

610
	spin_lock(&mm->page_table_lock);
D
David Gibson 已提交
611
	while (vaddr < vma->vm_end && remainder) {
A
Adam Litke 已提交
612 613
		pte_t *pte;
		struct page *page;
D
David Gibson 已提交
614

A
Adam Litke 已提交
615 616 617 618 619 620
		/*
		 * Some archs (sparc64, sh*) have multiple pte_ts to
		 * each hugepage.  We have to make * sure we get the
		 * first, for the page indexing below to work.
		 */
		pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
D
David Gibson 已提交
621

A
Adam Litke 已提交
622 623
		if (!pte || pte_none(*pte)) {
			int ret;
D
David Gibson 已提交
624

A
Adam Litke 已提交
625 626 627 628 629
			spin_unlock(&mm->page_table_lock);
			ret = hugetlb_fault(mm, vma, vaddr, 0);
			spin_lock(&mm->page_table_lock);
			if (ret == VM_FAULT_MINOR)
				continue;
D
David Gibson 已提交
630

A
Adam Litke 已提交
631 632 633 634 635 636
			remainder = 0;
			if (!i)
				i = -EFAULT;
			break;
		}

637 638 639
		pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
		page = pte_page(*pte);
same_page:
640 641
		if (pages) {
			get_page(page);
642
			pages[i] = page + pfn_offset;
643
		}
D
David Gibson 已提交
644 645 646 647 648

		if (vmas)
			vmas[i] = vma;

		vaddr += PAGE_SIZE;
649
		++pfn_offset;
D
David Gibson 已提交
650 651
		--remainder;
		++i;
652 653 654 655 656 657 658 659
		if (vaddr < vma->vm_end && remainder &&
				pfn_offset < HPAGE_SIZE/PAGE_SIZE) {
			/*
			 * We use pfn_offset to avoid touching the pageframes
			 * of this compound page.
			 */
			goto same_page;
		}
D
David Gibson 已提交
660
	}
661
	spin_unlock(&mm->page_table_lock);
D
David Gibson 已提交
662 663 664 665 666
	*length = remainder;
	*position = vaddr;

	return i;
}
667 668 669 670 671 672 673 674 675 676 677 678

void hugetlb_change_protection(struct vm_area_struct *vma,
		unsigned long address, unsigned long end, pgprot_t newprot)
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long start = address;
	pte_t *ptep;
	pte_t pte;

	BUG_ON(address >= end);
	flush_cache_range(vma, address, end);

679
	spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
680 681 682 683 684
	spin_lock(&mm->page_table_lock);
	for (; address < end; address += HPAGE_SIZE) {
		ptep = huge_pte_offset(mm, address);
		if (!ptep)
			continue;
685 686
		if (huge_pmd_unshare(mm, &address, ptep))
			continue;
687 688 689 690 691 692 693 694
		if (!pte_none(*ptep)) {
			pte = huge_ptep_get_and_clear(mm, address, ptep);
			pte = pte_mkhuge(pte_modify(pte, newprot));
			set_huge_pte_at(mm, address, ptep, pte);
			lazy_mmu_prot_update(pte);
		}
	}
	spin_unlock(&mm->page_table_lock);
695
	spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
696 697 698 699

	flush_tlb_range(vma, start, end);
}

700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840
struct file_region {
	struct list_head link;
	long from;
	long to;
};

static long region_add(struct list_head *head, long f, long t)
{
	struct file_region *rg, *nrg, *trg;

	/* Locate the region we are either in or before. */
	list_for_each_entry(rg, head, link)
		if (f <= rg->to)
			break;

	/* Round our left edge to the current segment if it encloses us. */
	if (f > rg->from)
		f = rg->from;

	/* Check for and consume any regions we now overlap with. */
	nrg = rg;
	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
		if (&rg->link == head)
			break;
		if (rg->from > t)
			break;

		/* If this area reaches higher then extend our area to
		 * include it completely.  If this is not the first area
		 * which we intend to reuse, free it. */
		if (rg->to > t)
			t = rg->to;
		if (rg != nrg) {
			list_del(&rg->link);
			kfree(rg);
		}
	}
	nrg->from = f;
	nrg->to = t;
	return 0;
}

static long region_chg(struct list_head *head, long f, long t)
{
	struct file_region *rg, *nrg;
	long chg = 0;

	/* Locate the region we are before or in. */
	list_for_each_entry(rg, head, link)
		if (f <= rg->to)
			break;

	/* If we are below the current region then a new region is required.
	 * Subtle, allocate a new region at the position but make it zero
	 * size such that we can guarentee to record the reservation. */
	if (&rg->link == head || t < rg->from) {
		nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
		if (nrg == 0)
			return -ENOMEM;
		nrg->from = f;
		nrg->to   = f;
		INIT_LIST_HEAD(&nrg->link);
		list_add(&nrg->link, rg->link.prev);

		return t - f;
	}

	/* Round our left edge to the current segment if it encloses us. */
	if (f > rg->from)
		f = rg->from;
	chg = t - f;

	/* Check for and consume any regions we now overlap with. */
	list_for_each_entry(rg, rg->link.prev, link) {
		if (&rg->link == head)
			break;
		if (rg->from > t)
			return chg;

		/* We overlap with this area, if it extends futher than
		 * us then we must extend ourselves.  Account for its
		 * existing reservation. */
		if (rg->to > t) {
			chg += rg->to - t;
			t = rg->to;
		}
		chg -= rg->to - rg->from;
	}
	return chg;
}

static long region_truncate(struct list_head *head, long end)
{
	struct file_region *rg, *trg;
	long chg = 0;

	/* Locate the region we are either in or before. */
	list_for_each_entry(rg, head, link)
		if (end <= rg->to)
			break;
	if (&rg->link == head)
		return 0;

	/* If we are in the middle of a region then adjust it. */
	if (end > rg->from) {
		chg = rg->to - end;
		rg->to = end;
		rg = list_entry(rg->link.next, typeof(*rg), link);
	}

	/* Drop any remaining regions. */
	list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
		if (&rg->link == head)
			break;
		chg += rg->to - rg->from;
		list_del(&rg->link);
		kfree(rg);
	}
	return chg;
}

static int hugetlb_acct_memory(long delta)
{
	int ret = -ENOMEM;

	spin_lock(&hugetlb_lock);
	if ((delta + resv_huge_pages) <= free_huge_pages) {
		resv_huge_pages += delta;
		ret = 0;
	}
	spin_unlock(&hugetlb_lock);
	return ret;
}

int hugetlb_reserve_pages(struct inode *inode, long from, long to)
{
	long ret, chg;

	chg = region_chg(&inode->i_mapping->private_list, from, to);
	if (chg < 0)
		return chg;
841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860
	/*
	 * When cpuset is configured, it breaks the strict hugetlb page
	 * reservation as the accounting is done on a global variable. Such
	 * reservation is completely rubbish in the presence of cpuset because
	 * the reservation is not checked against page availability for the
	 * current cpuset. Application can still potentially OOM'ed by kernel
	 * with lack of free htlb page in cpuset that the task is in.
	 * Attempt to enforce strict accounting with cpuset is almost
	 * impossible (or too ugly) because cpuset is too fluid that
	 * task or memory node can be dynamically moved between cpusets.
	 *
	 * The change of semantics for shared hugetlb mapping with cpuset is
	 * undesirable. However, in order to preserve some of the semantics,
	 * we fall back to check against current free page availability as
	 * a best attempt and hopefully to minimize the impact of changing
	 * semantics that cpuset has.
	 */
	if (chg > cpuset_mems_nr(free_huge_pages_node))
		return -ENOMEM;

861 862 863 864 865 866 867 868 869 870 871 872
	ret = hugetlb_acct_memory(chg);
	if (ret < 0)
		return ret;
	region_add(&inode->i_mapping->private_list, from, to);
	return 0;
}

void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
{
	long chg = region_truncate(&inode->i_mapping->private_list, offset);
	hugetlb_acct_memory(freed - chg);
}