mlock.c 22.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7
/*
 *	linux/mm/mlock.c
 *
 *  (C) Copyright 1995 Linus Torvalds
 *  (C) Copyright 2002 Christoph Hellwig
 */

8
#include <linux/capability.h>
L
Linus Torvalds 已提交
9 10
#include <linux/mman.h>
#include <linux/mm.h>
N
Nick Piggin 已提交
11 12 13
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/pagemap.h>
14
#include <linux/pagevec.h>
L
Linus Torvalds 已提交
15 16
#include <linux/mempolicy.h>
#include <linux/syscalls.h>
A
Alexey Dobriyan 已提交
17
#include <linux/sched.h>
18
#include <linux/export.h>
N
Nick Piggin 已提交
19 20 21
#include <linux/rmap.h>
#include <linux/mmzone.h>
#include <linux/hugetlb.h>
22 23
#include <linux/memcontrol.h>
#include <linux/mm_inline.h>
N
Nick Piggin 已提交
24 25

#include "internal.h"
L
Linus Torvalds 已提交
26

27
bool can_do_mlock(void)
A
Alexey Dobriyan 已提交
28
{
J
Jiri Slaby 已提交
29
	if (rlimit(RLIMIT_MEMLOCK) != 0)
30
		return true;
31
	if (capable(CAP_IPC_LOCK))
32 33
		return true;
	return false;
A
Alexey Dobriyan 已提交
34 35
}
EXPORT_SYMBOL(can_do_mlock);
L
Linus Torvalds 已提交
36

N
Nick Piggin 已提交
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
/*
 * Mlocked pages are marked with PageMlocked() flag for efficient testing
 * in vmscan and, possibly, the fault path; and to support semi-accurate
 * statistics.
 *
 * An mlocked page [PageMlocked(page)] is unevictable.  As such, it will
 * be placed on the LRU "unevictable" list, rather than the [in]active lists.
 * The unevictable list is an LRU sibling list to the [in]active lists.
 * PageUnevictable is set to indicate the unevictable state.
 *
 * When lazy mlocking via vmscan, it is important to ensure that the
 * vma's VM_LOCKED status is not concurrently being modified, otherwise we
 * may have mlocked a page that is being munlocked. So lazy mlock must take
 * the mmap_sem for read, and verify that the vma really is locked
 * (see mm/rmap.c).
 */

/*
 *  LRU accounting for clear_page_mlock()
 */
57
void clear_page_mlock(struct page *page)
N
Nick Piggin 已提交
58
{
59
	if (!TestClearPageMlocked(page))
N
Nick Piggin 已提交
60 61
		return;

D
David Rientjes 已提交
62 63
	mod_zone_page_state(page_zone(page), NR_MLOCK,
			    -hpage_nr_pages(page));
N
Nick Piggin 已提交
64
	count_vm_event(UNEVICTABLE_PGCLEARED);
N
Nick Piggin 已提交
65 66 67 68
	if (!isolate_lru_page(page)) {
		putback_lru_page(page);
	} else {
		/*
69
		 * We lost the race. the page already moved to evictable list.
N
Nick Piggin 已提交
70
		 */
71
		if (PageUnevictable(page))
N
Nick Piggin 已提交
72
			count_vm_event(UNEVICTABLE_PGSTRANDED);
N
Nick Piggin 已提交
73 74 75 76 77 78 79 80 81
	}
}

/*
 * Mark page as mlocked if not already.
 * If page on LRU, isolate and putback to move to unevictable list.
 */
void mlock_vma_page(struct page *page)
{
82
	/* Serialize with page migration */
N
Nick Piggin 已提交
83 84
	BUG_ON(!PageLocked(page));

85 86 87
	VM_BUG_ON_PAGE(PageTail(page), page);
	VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);

N
Nick Piggin 已提交
88
	if (!TestSetPageMlocked(page)) {
D
David Rientjes 已提交
89 90
		mod_zone_page_state(page_zone(page), NR_MLOCK,
				    hpage_nr_pages(page));
N
Nick Piggin 已提交
91 92 93 94
		count_vm_event(UNEVICTABLE_PGMLOCKED);
		if (!isolate_lru_page(page))
			putback_lru_page(page);
	}
N
Nick Piggin 已提交
95 96
}

97 98 99 100 101 102 103 104 105
/*
 * Isolate a page from LRU with optional get_page() pin.
 * Assumes lru_lock already held and page already pinned.
 */
static bool __munlock_isolate_lru_page(struct page *page, bool getpage)
{
	if (PageLRU(page)) {
		struct lruvec *lruvec;

M
Mel Gorman 已提交
106
		lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
107 108 109 110 111 112 113 114 115 116
		if (getpage)
			get_page(page);
		ClearPageLRU(page);
		del_page_from_lru_list(page, lruvec, page_lru(page));
		return true;
	}

	return false;
}

117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
/*
 * Finish munlock after successful page isolation
 *
 * Page must be locked. This is a wrapper for try_to_munlock()
 * and putback_lru_page() with munlock accounting.
 */
static void __munlock_isolated_page(struct page *page)
{
	int ret = SWAP_AGAIN;

	/*
	 * Optimization: if the page was mapped just once, that's our mapping
	 * and we don't need to check all the other vmas.
	 */
	if (page_mapcount(page) > 1)
		ret = try_to_munlock(page);

	/* Did try_to_unlock() succeed or punt? */
	if (ret != SWAP_MLOCK)
		count_vm_event(UNEVICTABLE_PGMUNLOCKED);

	putback_lru_page(page);
}

/*
 * Accounting for page isolation fail during munlock
 *
 * Performs accounting when page isolation fails in munlock. There is nothing
 * else to do because it means some other task has already removed the page
 * from the LRU. putback_lru_page() will take care of removing the page from
 * the unevictable list, if necessary. vmscan [page_referenced()] will move
 * the page back to the unevictable list if some other vma has it mlocked.
 */
static void __munlock_isolation_failed(struct page *page)
{
	if (PageUnevictable(page))
153
		__count_vm_event(UNEVICTABLE_PGSTRANDED);
154
	else
155
		__count_vm_event(UNEVICTABLE_PGMUNLOCKED);
156 157
}

158 159
/**
 * munlock_vma_page - munlock a vma page
160 161 162 163
 * @page - page to be unlocked, either a normal page or THP page head
 *
 * returns the size of the page as a page mask (0 for normal page,
 *         HPAGE_PMD_NR - 1 for THP head page)
N
Nick Piggin 已提交
164
 *
165 166 167 168 169 170 171 172 173 174
 * called from munlock()/munmap() path with page supposedly on the LRU.
 * When we munlock a page, because the vma where we found the page is being
 * munlock()ed or munmap()ed, we want to check whether other vmas hold the
 * page locked so that we can leave it on the unevictable lru list and not
 * bother vmscan with it.  However, to walk the page's rmap list in
 * try_to_munlock() we must isolate the page from the LRU.  If some other
 * task has removed the page from the LRU, we won't be able to do that.
 * So we clear the PageMlocked as we might not get another chance.  If we
 * can't isolate the page, we leave it for putback_lru_page() and vmscan
 * [page_referenced()/try_to_unmap()] to deal with.
N
Nick Piggin 已提交
175
 */
176
unsigned int munlock_vma_page(struct page *page)
N
Nick Piggin 已提交
177
{
K
Kirill A. Shutemov 已提交
178
	int nr_pages;
179
	struct zone *zone = page_zone(page);
180

181
	/* For try_to_munlock() and to serialize with page migration */
N
Nick Piggin 已提交
182 183
	BUG_ON(!PageLocked(page));

184 185
	VM_BUG_ON_PAGE(PageTail(page), page);

186
	/*
187 188 189
	 * Serialize with any parallel __split_huge_page_refcount() which
	 * might otherwise copy PageMlocked to part of the tail pages before
	 * we clear it in the head page. It also stabilizes hpage_nr_pages().
190
	 */
191
	spin_lock_irq(zone_lru_lock(zone));
192 193 194 195 196 197 198 199

	nr_pages = hpage_nr_pages(page);
	if (!TestClearPageMlocked(page))
		goto unlock_out;

	__mod_zone_page_state(zone, NR_MLOCK, -nr_pages);

	if (__munlock_isolate_lru_page(page, true)) {
200
		spin_unlock_irq(zone_lru_lock(zone));
201 202 203 204 205 206
		__munlock_isolated_page(page);
		goto out;
	}
	__munlock_isolation_failed(page);

unlock_out:
207
	spin_unlock_irq(zone_lru_lock(zone));
208 209

out:
210
	return nr_pages - 1;
N
Nick Piggin 已提交
211 212
}

213 214 215 216 217 218 219 220 221 222
/*
 * convert get_user_pages() return value to posix mlock() error
 */
static int __mlock_posix_error_return(long retval)
{
	if (retval == -EFAULT)
		retval = -ENOMEM;
	else if (retval == -ENOMEM)
		retval = -EAGAIN;
	return retval;
N
Nick Piggin 已提交
223 224
}

225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
/*
 * Prepare page for fast batched LRU putback via putback_lru_evictable_pagevec()
 *
 * The fast path is available only for evictable pages with single mapping.
 * Then we can bypass the per-cpu pvec and get better performance.
 * when mapcount > 1 we need try_to_munlock() which can fail.
 * when !page_evictable(), we need the full redo logic of putback_lru_page to
 * avoid leaving evictable page in unevictable list.
 *
 * In case of success, @page is added to @pvec and @pgrescued is incremented
 * in case that the page was previously unevictable. @page is also unlocked.
 */
static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec,
		int *pgrescued)
{
240 241
	VM_BUG_ON_PAGE(PageLRU(page), page);
	VM_BUG_ON_PAGE(!PageLocked(page), page);
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270

	if (page_mapcount(page) <= 1 && page_evictable(page)) {
		pagevec_add(pvec, page);
		if (TestClearPageUnevictable(page))
			(*pgrescued)++;
		unlock_page(page);
		return true;
	}

	return false;
}

/*
 * Putback multiple evictable pages to the LRU
 *
 * Batched putback of evictable pages that bypasses the per-cpu pvec. Some of
 * the pages might have meanwhile become unevictable but that is OK.
 */
static void __putback_lru_fast(struct pagevec *pvec, int pgrescued)
{
	count_vm_events(UNEVICTABLE_PGMUNLOCKED, pagevec_count(pvec));
	/*
	 *__pagevec_lru_add() calls release_pages() so we don't call
	 * put_page() explicitly
	 */
	__pagevec_lru_add(pvec);
	count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
}

271 272 273 274 275 276 277 278
/*
 * Munlock a batch of pages from the same zone
 *
 * The work is split to two main phases. First phase clears the Mlocked flag
 * and attempts to isolate the pages, all under a single zone lru lock.
 * The second phase finishes the munlock only for pages where isolation
 * succeeded.
 *
279
 * Note that the pagevec may be modified during the process.
280 281 282 283 284
 */
static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
{
	int i;
	int nr = pagevec_count(pvec);
285
	int delta_munlocked;
286 287
	struct pagevec pvec_putback;
	int pgrescued = 0;
288

289 290
	pagevec_init(&pvec_putback, 0);

291
	/* Phase 1: page isolation */
292
	spin_lock_irq(zone_lru_lock(zone));
293 294 295 296 297
	for (i = 0; i < nr; i++) {
		struct page *page = pvec->pages[i];

		if (TestClearPageMlocked(page)) {
			/*
298 299
			 * We already have pin from follow_page_mask()
			 * so we can spare the get_page() here.
300
			 */
301 302 303 304
			if (__munlock_isolate_lru_page(page, false))
				continue;
			else
				__munlock_isolation_failed(page);
305
		}
306 307 308 309 310 311 312 313 314

		/*
		 * We won't be munlocking this page in the next phase
		 * but we still need to release the follow_page_mask()
		 * pin. We cannot do it under lru_lock however. If it's
		 * the last pin, __page_cache_release() would deadlock.
		 */
		pagevec_add(&pvec_putback, pvec->pages[i]);
		pvec->pages[i] = NULL;
315
	}
316
	delta_munlocked = -nr + pagevec_count(&pvec_putback);
317
	__mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
318
	spin_unlock_irq(zone_lru_lock(zone));
319

320 321 322
	/* Now we can release pins of pages that we are not munlocking */
	pagevec_release(&pvec_putback);

323
	/* Phase 2: page munlock */
324 325 326 327 328
	for (i = 0; i < nr; i++) {
		struct page *page = pvec->pages[i];

		if (page) {
			lock_page(page);
329 330
			if (!__putback_lru_fast_prepare(page, &pvec_putback,
					&pgrescued)) {
331 332 333 334 335
				/*
				 * Slow path. We don't want to lose the last
				 * pin before unlock_page()
				 */
				get_page(page); /* for putback_lru_page() */
336 337
				__munlock_isolated_page(page);
				unlock_page(page);
338
				put_page(page); /* from follow_page_mask() */
339
			}
340 341
		}
	}
342

343 344 345 346
	/*
	 * Phase 3: page putback for pages that qualified for the fast path
	 * This will also call put_page() to return pin from follow_page_mask()
	 */
347 348
	if (pagevec_count(&pvec_putback))
		__putback_lru_fast(&pvec_putback, pgrescued);
349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
}

/*
 * Fill up pagevec for __munlock_pagevec using pte walk
 *
 * The function expects that the struct page corresponding to @start address is
 * a non-TPH page already pinned and in the @pvec, and that it belongs to @zone.
 *
 * The rest of @pvec is filled by subsequent pages within the same pmd and same
 * zone, as long as the pte's are present and vm_normal_page() succeeds. These
 * pages also get pinned.
 *
 * Returns the address of the next page that should be scanned. This equals
 * @start + PAGE_SIZE when no page could be added by the pte walk.
 */
static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
		struct vm_area_struct *vma, int zoneid,	unsigned long start,
		unsigned long end)
{
	pte_t *pte;
	spinlock_t *ptl;

	/*
	 * Initialize pte walk starting at the already pinned page where we
373 374
	 * are sure that there is a pte, as it was pinned under the same
	 * mmap_sem write op.
375 376
	 */
	pte = get_locked_pte(vma->vm_mm, start,	&ptl);
377 378 379 380
	/* Make sure we do not cross the page table boundary */
	end = pgd_addr_end(start, end);
	end = pud_addr_end(start, end);
	end = pmd_addr_end(start, end);
381 382 383 384 385 386 387 388 389 390 391 392 393 394

	/* The page next to the pinned page is the first we will try to get */
	start += PAGE_SIZE;
	while (start < end) {
		struct page *page = NULL;
		pte++;
		if (pte_present(*pte))
			page = vm_normal_page(vma, start, *pte);
		/*
		 * Break if page could not be obtained or the page's node+zone does not
		 * match
		 */
		if (!page || page_zone_id(page) != zoneid)
			break;
395

396 397 398 399 400 401 402
		/*
		 * Do not use pagevec for PTE-mapped THP,
		 * munlock_vma_pages_range() will handle them.
		 */
		if (PageTransCompound(page))
			break;

403 404 405 406 407 408 409 410 411 412 413
		get_page(page);
		/*
		 * Increase the address that will be returned *before* the
		 * eventual break due to pvec becoming full by adding the page
		 */
		start += PAGE_SIZE;
		if (pagevec_add(pvec, page) == 0)
			break;
	}
	pte_unmap_unlock(pte, ptl);
	return start;
414 415
}

N
Nick Piggin 已提交
416
/*
417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
 * munlock_vma_pages_range() - munlock all pages in the vma range.'
 * @vma - vma containing range to be munlock()ed.
 * @start - start address in @vma of the range
 * @end - end of range in @vma.
 *
 *  For mremap(), munmap() and exit().
 *
 * Called with @vma VM_LOCKED.
 *
 * Returns with VM_LOCKED cleared.  Callers must be prepared to
 * deal with this.
 *
 * We don't save and restore VM_LOCKED here because pages are
 * still on lru.  In unmap path, pages might be scanned by reclaim
 * and re-mlocked by try_to_{munlock|unmap} before we unmap and
 * free them.  This will result in freeing mlocked pages.
N
Nick Piggin 已提交
433
 */
434
void munlock_vma_pages_range(struct vm_area_struct *vma,
H
Hugh Dickins 已提交
435
			     unsigned long start, unsigned long end)
N
Nick Piggin 已提交
436
{
E
Eric B Munson 已提交
437
	vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
H
Hugh Dickins 已提交
438

439
	while (start < end) {
440
		struct page *page;
441 442
		unsigned int page_mask;
		unsigned long page_increm;
443 444 445
		struct pagevec pvec;
		struct zone *zone;
		int zoneid;
446

447
		pagevec_init(&pvec, 0);
H
Hugh Dickins 已提交
448 449 450 451 452 453 454
		/*
		 * Although FOLL_DUMP is intended for get_dump_page(),
		 * it just so happens that its special treatment of the
		 * ZERO_PAGE (returning an error instead of doing get_page)
		 * suits munlock very well (and if somehow an abnormal page
		 * has sneaked into the range, we won't oops here: great).
		 */
455
		page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP,
456 457
				&page_mask);

458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481
		if (page && !IS_ERR(page)) {
			if (PageTransTail(page)) {
				VM_BUG_ON_PAGE(PageMlocked(page), page);
				put_page(page); /* follow_page_mask() */
			} else if (PageTransHuge(page)) {
				lock_page(page);
				/*
				 * Any THP page found by follow_page_mask() may
				 * have gotten split before reaching
				 * munlock_vma_page(), so we need to recompute
				 * the page_mask here.
				 */
				page_mask = munlock_vma_page(page);
				unlock_page(page);
				put_page(page); /* follow_page_mask() */
			} else {
				/*
				 * Non-huge pages are handled in batches via
				 * pagevec. The pin from follow_page_mask()
				 * prevents them from collapsing by THP.
				 */
				pagevec_add(&pvec, page);
				zone = page_zone(page);
				zoneid = page_zone_id(page);
482

483 484 485 486 487 488 489 490 491 492 493
				/*
				 * Try to fill the rest of pagevec using fast
				 * pte walk. This will also update start to
				 * the next page to process. Then munlock the
				 * pagevec.
				 */
				start = __munlock_pagevec_fill(&pvec, vma,
						zoneid, start, end);
				__munlock_pagevec(&pvec, zone);
				goto next;
			}
H
Hugh Dickins 已提交
494
		}
495
		page_increm = 1 + page_mask;
496
		start += page_increm * PAGE_SIZE;
497
next:
H
Hugh Dickins 已提交
498 499
		cond_resched();
	}
N
Nick Piggin 已提交
500 501 502 503 504 505 506
}

/*
 * mlock_fixup  - handle mlock[all]/munlock[all] requests.
 *
 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
 * munlock is a no-op.  However, for some special vmas, we go ahead and
507
 * populate the ptes.
N
Nick Piggin 已提交
508 509 510
 *
 * For vmas that pass the filters, merge/split as appropriate.
 */
L
Linus Torvalds 已提交
511
static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
512
	unsigned long start, unsigned long end, vm_flags_t newflags)
L
Linus Torvalds 已提交
513
{
N
Nick Piggin 已提交
514
	struct mm_struct *mm = vma->vm_mm;
L
Linus Torvalds 已提交
515
	pgoff_t pgoff;
N
Nick Piggin 已提交
516
	int nr_pages;
L
Linus Torvalds 已提交
517
	int ret = 0;
518
	int lock = !!(newflags & VM_LOCKED);
L
Linus Torvalds 已提交
519

520
	if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
521
	    is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm))
522 523
		/* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */
		goto out;
N
Nick Piggin 已提交
524

L
Linus Torvalds 已提交
525 526
	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
	*prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
527 528
			  vma->vm_file, pgoff, vma_policy(vma),
			  vma->vm_userfaultfd_ctx);
L
Linus Torvalds 已提交
529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546
	if (*prev) {
		vma = *prev;
		goto success;
	}

	if (start != vma->vm_start) {
		ret = split_vma(mm, vma, start, 1);
		if (ret)
			goto out;
	}

	if (end != vma->vm_end) {
		ret = split_vma(mm, vma, end, 0);
		if (ret)
			goto out;
	}

success:
N
Nick Piggin 已提交
547 548 549 550 551 552 553 554
	/*
	 * Keep track of amount of locked VM.
	 */
	nr_pages = (end - start) >> PAGE_SHIFT;
	if (!lock)
		nr_pages = -nr_pages;
	mm->locked_vm += nr_pages;

L
Linus Torvalds 已提交
555 556 557
	/*
	 * vm_flags is protected by the mmap_sem held in write mode.
	 * It's okay if try_to_unmap_one unmaps a page just after we
558
	 * set VM_LOCKED, populate_vma_page_range will bring it back.
L
Linus Torvalds 已提交
559 560
	 */

561
	if (lock)
H
Hugh Dickins 已提交
562
		vma->vm_flags = newflags;
563
	else
H
Hugh Dickins 已提交
564
		munlock_vma_pages_range(vma, start, end);
L
Linus Torvalds 已提交
565 566

out:
N
Nick Piggin 已提交
567
	*prev = vma;
L
Linus Torvalds 已提交
568 569 570
	return ret;
}

571 572
static int apply_vma_lock_flags(unsigned long start, size_t len,
				vm_flags_t flags)
L
Linus Torvalds 已提交
573 574 575 576 577
{
	unsigned long nstart, end, tmp;
	struct vm_area_struct * vma, * prev;
	int error;

578
	VM_BUG_ON(offset_in_page(start));
579
	VM_BUG_ON(len != PAGE_ALIGN(len));
L
Linus Torvalds 已提交
580 581 582 583 584
	end = start + len;
	if (end < start)
		return -EINVAL;
	if (end == start)
		return 0;
585
	vma = find_vma(current->mm, start);
L
Linus Torvalds 已提交
586 587 588
	if (!vma || vma->vm_start > start)
		return -ENOMEM;

589
	prev = vma->vm_prev;
L
Linus Torvalds 已提交
590 591 592 593
	if (start > vma->vm_start)
		prev = vma;

	for (nstart = start ; ; ) {
594
		vm_flags_t newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
L
Linus Torvalds 已提交
595

596
		newflags |= flags;
L
Linus Torvalds 已提交
597

598
		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
L
Linus Torvalds 已提交
599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619
		tmp = vma->vm_end;
		if (tmp > end)
			tmp = end;
		error = mlock_fixup(vma, &prev, nstart, tmp, newflags);
		if (error)
			break;
		nstart = tmp;
		if (nstart < prev->vm_end)
			nstart = prev->vm_end;
		if (nstart >= end)
			break;

		vma = prev->vm_next;
		if (!vma || vma->vm_start != nstart) {
			error = -ENOMEM;
			break;
		}
	}
	return error;
}

620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658
/*
 * Go through vma areas and sum size of mlocked
 * vma pages, as return value.
 * Note deferred memory locking case(mlock2(,,MLOCK_ONFAULT)
 * is also counted.
 * Return value: previously mlocked page counts
 */
static int count_mm_mlocked_page_nr(struct mm_struct *mm,
		unsigned long start, size_t len)
{
	struct vm_area_struct *vma;
	int count = 0;

	if (mm == NULL)
		mm = current->mm;

	vma = find_vma(mm, start);
	if (vma == NULL)
		vma = mm->mmap;

	for (; vma ; vma = vma->vm_next) {
		if (start >= vma->vm_end)
			continue;
		if (start + len <=  vma->vm_start)
			break;
		if (vma->vm_flags & VM_LOCKED) {
			if (start > vma->vm_start)
				count -= (start - vma->vm_start);
			if (start + len < vma->vm_end) {
				count += start + len - vma->vm_start;
				break;
			}
			count += vma->vm_end - vma->vm_start;
		}
	}

	return count >> PAGE_SHIFT;
}

659
static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags)
L
Linus Torvalds 已提交
660 661 662 663 664 665 666 667
{
	unsigned long locked;
	unsigned long lock_limit;
	int error = -ENOMEM;

	if (!can_do_mlock())
		return -EPERM;

668 669
	lru_add_drain_all();	/* flush pagevec */

670
	len = PAGE_ALIGN(len + (offset_in_page(start)));
L
Linus Torvalds 已提交
671 672
	start &= PAGE_MASK;

J
Jiri Slaby 已提交
673
	lock_limit = rlimit(RLIMIT_MEMLOCK);
L
Linus Torvalds 已提交
674
	lock_limit >>= PAGE_SHIFT;
675 676
	locked = len >> PAGE_SHIFT;

677 678
	if (down_write_killable(&current->mm->mmap_sem))
		return -EINTR;
679 680

	locked += current->mm->locked_vm;
681 682 683 684 685 686 687 688 689 690
	if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) {
		/*
		 * It is possible that the regions requested intersect with
		 * previously mlocked areas, that part area in "mm->locked_vm"
		 * should not be counted to new mlock increment count. So check
		 * and adjust locked count if necessary.
		 */
		locked -= count_mm_mlocked_page_nr(current->mm,
				start, len);
	}
L
Linus Torvalds 已提交
691 692 693

	/* check against resource limits */
	if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
694
		error = apply_vma_lock_flags(start, len, flags);
695

L
Linus Torvalds 已提交
696
	up_write(&current->mm->mmap_sem);
697 698 699 700 701 702 703
	if (error)
		return error;

	error = __mm_populate(start, len, 0);
	if (error)
		return __mlock_posix_error_return(error);
	return 0;
L
Linus Torvalds 已提交
704 705
}

706 707 708 709 710
SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
{
	return do_mlock(start, len, VM_LOCKED);
}

711 712
SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags)
{
713 714 715
	vm_flags_t vm_flags = VM_LOCKED;

	if (flags & ~MLOCK_ONFAULT)
716 717
		return -EINVAL;

718 719 720 721
	if (flags & MLOCK_ONFAULT)
		vm_flags |= VM_LOCKONFAULT;

	return do_mlock(start, len, vm_flags);
722 723
}

724
SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
L
Linus Torvalds 已提交
725 726 727
{
	int ret;

728
	len = PAGE_ALIGN(len + (offset_in_page(start)));
L
Linus Torvalds 已提交
729
	start &= PAGE_MASK;
730

731 732
	if (down_write_killable(&current->mm->mmap_sem))
		return -EINTR;
733
	ret = apply_vma_lock_flags(start, len, 0);
L
Linus Torvalds 已提交
734
	up_write(&current->mm->mmap_sem);
735

L
Linus Torvalds 已提交
736 737 738
	return ret;
}

739 740 741 742 743 744 745 746 747 748
/*
 * Take the MCL_* flags passed into mlockall (or 0 if called from munlockall)
 * and translate into the appropriate modifications to mm->def_flags and/or the
 * flags for all current VMAs.
 *
 * There are a couple of subtleties with this.  If mlockall() is called multiple
 * times with different flags, the values do not necessarily stack.  If mlockall
 * is called once including the MCL_FUTURE flag and then a second time without
 * it, VM_LOCKED and VM_LOCKONFAULT will be cleared from mm->def_flags.
 */
749
static int apply_mlockall_flags(int flags)
L
Linus Torvalds 已提交
750 751
{
	struct vm_area_struct * vma, * prev = NULL;
752
	vm_flags_t to_add = 0;
L
Linus Torvalds 已提交
753

754 755
	current->mm->def_flags &= VM_LOCKED_CLEAR_MASK;
	if (flags & MCL_FUTURE) {
756
		current->mm->def_flags |= VM_LOCKED;
757

758 759 760 761 762 763 764 765 766 767 768 769
		if (flags & MCL_ONFAULT)
			current->mm->def_flags |= VM_LOCKONFAULT;

		if (!(flags & MCL_CURRENT))
			goto out;
	}

	if (flags & MCL_CURRENT) {
		to_add |= VM_LOCKED;
		if (flags & MCL_ONFAULT)
			to_add |= VM_LOCKONFAULT;
	}
L
Linus Torvalds 已提交
770 771

	for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
772
		vm_flags_t newflags;
L
Linus Torvalds 已提交
773

774 775
		newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
		newflags |= to_add;
L
Linus Torvalds 已提交
776 777 778

		/* Ignore errors */
		mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
779
		cond_resched_rcu_qs();
L
Linus Torvalds 已提交
780 781 782 783 784
	}
out:
	return 0;
}

785
SYSCALL_DEFINE1(mlockall, int, flags)
L
Linus Torvalds 已提交
786 787
{
	unsigned long lock_limit;
788
	int ret;
L
Linus Torvalds 已提交
789

790
	if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)))
791
		return -EINVAL;
L
Linus Torvalds 已提交
792 793

	if (!can_do_mlock())
794
		return -EPERM;
L
Linus Torvalds 已提交
795

796 797
	if (flags & MCL_CURRENT)
		lru_add_drain_all();	/* flush pagevec */
798

J
Jiri Slaby 已提交
799
	lock_limit = rlimit(RLIMIT_MEMLOCK);
L
Linus Torvalds 已提交
800 801
	lock_limit >>= PAGE_SHIFT;

802 803
	if (down_write_killable(&current->mm->mmap_sem))
		return -EINTR;
804

805
	ret = -ENOMEM;
L
Linus Torvalds 已提交
806 807
	if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
	    capable(CAP_IPC_LOCK))
808
		ret = apply_mlockall_flags(flags);
L
Linus Torvalds 已提交
809
	up_write(&current->mm->mmap_sem);
810 811
	if (!ret && (flags & MCL_CURRENT))
		mm_populate(0, TASK_SIZE);
812

L
Linus Torvalds 已提交
813 814 815
	return ret;
}

816
SYSCALL_DEFINE0(munlockall)
L
Linus Torvalds 已提交
817 818 819
{
	int ret;

820 821
	if (down_write_killable(&current->mm->mmap_sem))
		return -EINTR;
822
	ret = apply_mlockall_flags(0);
L
Linus Torvalds 已提交
823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838
	up_write(&current->mm->mmap_sem);
	return ret;
}

/*
 * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
 * shm segments) get accounted against the user_struct instead.
 */
static DEFINE_SPINLOCK(shmlock_user_lock);

int user_shm_lock(size_t size, struct user_struct *user)
{
	unsigned long lock_limit, locked;
	int allowed = 0;

	locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
J
Jiri Slaby 已提交
839
	lock_limit = rlimit(RLIMIT_MEMLOCK);
840 841
	if (lock_limit == RLIM_INFINITY)
		allowed = 1;
L
Linus Torvalds 已提交
842 843
	lock_limit >>= PAGE_SHIFT;
	spin_lock(&shmlock_user_lock);
844 845
	if (!allowed &&
	    locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
L
Linus Torvalds 已提交
846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861
		goto out;
	get_uid(user);
	user->locked_shm += locked;
	allowed = 1;
out:
	spin_unlock(&shmlock_user_lock);
	return allowed;
}

void user_shm_unlock(size_t size, struct user_struct *user)
{
	spin_lock(&shmlock_user_lock);
	user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
	spin_unlock(&shmlock_user_lock);
	free_uid(user);
}