mlock.c 22.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7
/*
 *	linux/mm/mlock.c
 *
 *  (C) Copyright 1995 Linus Torvalds
 *  (C) Copyright 2002 Christoph Hellwig
 */

8
#include <linux/capability.h>
L
Linus Torvalds 已提交
9 10
#include <linux/mman.h>
#include <linux/mm.h>
11
#include <linux/sched/user.h>
N
Nick Piggin 已提交
12 13 14
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/pagemap.h>
15
#include <linux/pagevec.h>
L
Linus Torvalds 已提交
16 17
#include <linux/mempolicy.h>
#include <linux/syscalls.h>
A
Alexey Dobriyan 已提交
18
#include <linux/sched.h>
19
#include <linux/export.h>
N
Nick Piggin 已提交
20 21 22
#include <linux/rmap.h>
#include <linux/mmzone.h>
#include <linux/hugetlb.h>
23 24
#include <linux/memcontrol.h>
#include <linux/mm_inline.h>
N
Nick Piggin 已提交
25 26

#include "internal.h"
L
Linus Torvalds 已提交
27

28
bool can_do_mlock(void)
A
Alexey Dobriyan 已提交
29
{
J
Jiri Slaby 已提交
30
	if (rlimit(RLIMIT_MEMLOCK) != 0)
31
		return true;
32
	if (capable(CAP_IPC_LOCK))
33 34
		return true;
	return false;
A
Alexey Dobriyan 已提交
35 36
}
EXPORT_SYMBOL(can_do_mlock);
L
Linus Torvalds 已提交
37

N
Nick Piggin 已提交
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
/*
 * Mlocked pages are marked with PageMlocked() flag for efficient testing
 * in vmscan and, possibly, the fault path; and to support semi-accurate
 * statistics.
 *
 * An mlocked page [PageMlocked(page)] is unevictable.  As such, it will
 * be placed on the LRU "unevictable" list, rather than the [in]active lists.
 * The unevictable list is an LRU sibling list to the [in]active lists.
 * PageUnevictable is set to indicate the unevictable state.
 *
 * When lazy mlocking via vmscan, it is important to ensure that the
 * vma's VM_LOCKED status is not concurrently being modified, otherwise we
 * may have mlocked a page that is being munlocked. So lazy mlock must take
 * the mmap_sem for read, and verify that the vma really is locked
 * (see mm/rmap.c).
 */

/*
 *  LRU accounting for clear_page_mlock()
 */
58
void clear_page_mlock(struct page *page)
N
Nick Piggin 已提交
59
{
60
	if (!TestClearPageMlocked(page))
N
Nick Piggin 已提交
61 62
		return;

D
David Rientjes 已提交
63 64
	mod_zone_page_state(page_zone(page), NR_MLOCK,
			    -hpage_nr_pages(page));
N
Nick Piggin 已提交
65
	count_vm_event(UNEVICTABLE_PGCLEARED);
N
Nick Piggin 已提交
66 67 68 69
	if (!isolate_lru_page(page)) {
		putback_lru_page(page);
	} else {
		/*
70
		 * We lost the race. the page already moved to evictable list.
N
Nick Piggin 已提交
71
		 */
72
		if (PageUnevictable(page))
N
Nick Piggin 已提交
73
			count_vm_event(UNEVICTABLE_PGSTRANDED);
N
Nick Piggin 已提交
74 75 76 77 78 79 80 81 82
	}
}

/*
 * Mark page as mlocked if not already.
 * If page on LRU, isolate and putback to move to unevictable list.
 */
void mlock_vma_page(struct page *page)
{
83
	/* Serialize with page migration */
N
Nick Piggin 已提交
84 85
	BUG_ON(!PageLocked(page));

86 87 88
	VM_BUG_ON_PAGE(PageTail(page), page);
	VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);

N
Nick Piggin 已提交
89
	if (!TestSetPageMlocked(page)) {
D
David Rientjes 已提交
90 91
		mod_zone_page_state(page_zone(page), NR_MLOCK,
				    hpage_nr_pages(page));
N
Nick Piggin 已提交
92 93 94 95
		count_vm_event(UNEVICTABLE_PGMLOCKED);
		if (!isolate_lru_page(page))
			putback_lru_page(page);
	}
N
Nick Piggin 已提交
96 97
}

98 99 100 101 102 103 104 105 106
/*
 * Isolate a page from LRU with optional get_page() pin.
 * Assumes lru_lock already held and page already pinned.
 */
static bool __munlock_isolate_lru_page(struct page *page, bool getpage)
{
	if (PageLRU(page)) {
		struct lruvec *lruvec;

M
Mel Gorman 已提交
107
		lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
108 109 110 111 112 113 114 115 116 117
		if (getpage)
			get_page(page);
		ClearPageLRU(page);
		del_page_from_lru_list(page, lruvec, page_lru(page));
		return true;
	}

	return false;
}

118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
/*
 * Finish munlock after successful page isolation
 *
 * Page must be locked. This is a wrapper for try_to_munlock()
 * and putback_lru_page() with munlock accounting.
 */
static void __munlock_isolated_page(struct page *page)
{
	int ret = SWAP_AGAIN;

	/*
	 * Optimization: if the page was mapped just once, that's our mapping
	 * and we don't need to check all the other vmas.
	 */
	if (page_mapcount(page) > 1)
		ret = try_to_munlock(page);

	/* Did try_to_unlock() succeed or punt? */
	if (ret != SWAP_MLOCK)
		count_vm_event(UNEVICTABLE_PGMUNLOCKED);

	putback_lru_page(page);
}

/*
 * Accounting for page isolation fail during munlock
 *
 * Performs accounting when page isolation fails in munlock. There is nothing
 * else to do because it means some other task has already removed the page
 * from the LRU. putback_lru_page() will take care of removing the page from
 * the unevictable list, if necessary. vmscan [page_referenced()] will move
 * the page back to the unevictable list if some other vma has it mlocked.
 */
static void __munlock_isolation_failed(struct page *page)
{
	if (PageUnevictable(page))
154
		__count_vm_event(UNEVICTABLE_PGSTRANDED);
155
	else
156
		__count_vm_event(UNEVICTABLE_PGMUNLOCKED);
157 158
}

159 160
/**
 * munlock_vma_page - munlock a vma page
161 162 163 164
 * @page - page to be unlocked, either a normal page or THP page head
 *
 * returns the size of the page as a page mask (0 for normal page,
 *         HPAGE_PMD_NR - 1 for THP head page)
N
Nick Piggin 已提交
165
 *
166 167 168 169 170 171 172 173 174 175
 * called from munlock()/munmap() path with page supposedly on the LRU.
 * When we munlock a page, because the vma where we found the page is being
 * munlock()ed or munmap()ed, we want to check whether other vmas hold the
 * page locked so that we can leave it on the unevictable lru list and not
 * bother vmscan with it.  However, to walk the page's rmap list in
 * try_to_munlock() we must isolate the page from the LRU.  If some other
 * task has removed the page from the LRU, we won't be able to do that.
 * So we clear the PageMlocked as we might not get another chance.  If we
 * can't isolate the page, we leave it for putback_lru_page() and vmscan
 * [page_referenced()/try_to_unmap()] to deal with.
N
Nick Piggin 已提交
176
 */
177
unsigned int munlock_vma_page(struct page *page)
N
Nick Piggin 已提交
178
{
K
Kirill A. Shutemov 已提交
179
	int nr_pages;
180
	struct zone *zone = page_zone(page);
181

182
	/* For try_to_munlock() and to serialize with page migration */
N
Nick Piggin 已提交
183 184
	BUG_ON(!PageLocked(page));

185 186
	VM_BUG_ON_PAGE(PageTail(page), page);

187
	/*
188 189 190
	 * Serialize with any parallel __split_huge_page_refcount() which
	 * might otherwise copy PageMlocked to part of the tail pages before
	 * we clear it in the head page. It also stabilizes hpage_nr_pages().
191
	 */
192
	spin_lock_irq(zone_lru_lock(zone));
193

194 195 196
	if (!TestClearPageMlocked(page)) {
		/* Potentially, PTE-mapped THP: do not skip the rest PTEs */
		nr_pages = 1;
197
		goto unlock_out;
198
	}
199

200
	nr_pages = hpage_nr_pages(page);
201 202 203
	__mod_zone_page_state(zone, NR_MLOCK, -nr_pages);

	if (__munlock_isolate_lru_page(page, true)) {
204
		spin_unlock_irq(zone_lru_lock(zone));
205 206 207 208 209 210
		__munlock_isolated_page(page);
		goto out;
	}
	__munlock_isolation_failed(page);

unlock_out:
211
	spin_unlock_irq(zone_lru_lock(zone));
212 213

out:
214
	return nr_pages - 1;
N
Nick Piggin 已提交
215 216
}

217 218 219 220 221 222 223 224 225 226
/*
 * convert get_user_pages() return value to posix mlock() error
 */
static int __mlock_posix_error_return(long retval)
{
	if (retval == -EFAULT)
		retval = -ENOMEM;
	else if (retval == -ENOMEM)
		retval = -EAGAIN;
	return retval;
N
Nick Piggin 已提交
227 228
}

229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
/*
 * Prepare page for fast batched LRU putback via putback_lru_evictable_pagevec()
 *
 * The fast path is available only for evictable pages with single mapping.
 * Then we can bypass the per-cpu pvec and get better performance.
 * when mapcount > 1 we need try_to_munlock() which can fail.
 * when !page_evictable(), we need the full redo logic of putback_lru_page to
 * avoid leaving evictable page in unevictable list.
 *
 * In case of success, @page is added to @pvec and @pgrescued is incremented
 * in case that the page was previously unevictable. @page is also unlocked.
 */
static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec,
		int *pgrescued)
{
244 245
	VM_BUG_ON_PAGE(PageLRU(page), page);
	VM_BUG_ON_PAGE(!PageLocked(page), page);
246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274

	if (page_mapcount(page) <= 1 && page_evictable(page)) {
		pagevec_add(pvec, page);
		if (TestClearPageUnevictable(page))
			(*pgrescued)++;
		unlock_page(page);
		return true;
	}

	return false;
}

/*
 * Putback multiple evictable pages to the LRU
 *
 * Batched putback of evictable pages that bypasses the per-cpu pvec. Some of
 * the pages might have meanwhile become unevictable but that is OK.
 */
static void __putback_lru_fast(struct pagevec *pvec, int pgrescued)
{
	count_vm_events(UNEVICTABLE_PGMUNLOCKED, pagevec_count(pvec));
	/*
	 *__pagevec_lru_add() calls release_pages() so we don't call
	 * put_page() explicitly
	 */
	__pagevec_lru_add(pvec);
	count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
}

275 276 277 278 279 280 281 282
/*
 * Munlock a batch of pages from the same zone
 *
 * The work is split to two main phases. First phase clears the Mlocked flag
 * and attempts to isolate the pages, all under a single zone lru lock.
 * The second phase finishes the munlock only for pages where isolation
 * succeeded.
 *
283
 * Note that the pagevec may be modified during the process.
284 285 286 287 288
 */
static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
{
	int i;
	int nr = pagevec_count(pvec);
289
	int delta_munlocked;
290 291
	struct pagevec pvec_putback;
	int pgrescued = 0;
292

293 294
	pagevec_init(&pvec_putback, 0);

295
	/* Phase 1: page isolation */
296
	spin_lock_irq(zone_lru_lock(zone));
297 298 299 300 301
	for (i = 0; i < nr; i++) {
		struct page *page = pvec->pages[i];

		if (TestClearPageMlocked(page)) {
			/*
302 303
			 * We already have pin from follow_page_mask()
			 * so we can spare the get_page() here.
304
			 */
305 306 307 308
			if (__munlock_isolate_lru_page(page, false))
				continue;
			else
				__munlock_isolation_failed(page);
309
		}
310 311 312 313 314 315 316 317 318

		/*
		 * We won't be munlocking this page in the next phase
		 * but we still need to release the follow_page_mask()
		 * pin. We cannot do it under lru_lock however. If it's
		 * the last pin, __page_cache_release() would deadlock.
		 */
		pagevec_add(&pvec_putback, pvec->pages[i]);
		pvec->pages[i] = NULL;
319
	}
320
	delta_munlocked = -nr + pagevec_count(&pvec_putback);
321
	__mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
322
	spin_unlock_irq(zone_lru_lock(zone));
323

324 325 326
	/* Now we can release pins of pages that we are not munlocking */
	pagevec_release(&pvec_putback);

327
	/* Phase 2: page munlock */
328 329 330 331 332
	for (i = 0; i < nr; i++) {
		struct page *page = pvec->pages[i];

		if (page) {
			lock_page(page);
333 334
			if (!__putback_lru_fast_prepare(page, &pvec_putback,
					&pgrescued)) {
335 336 337 338 339
				/*
				 * Slow path. We don't want to lose the last
				 * pin before unlock_page()
				 */
				get_page(page); /* for putback_lru_page() */
340 341
				__munlock_isolated_page(page);
				unlock_page(page);
342
				put_page(page); /* from follow_page_mask() */
343
			}
344 345
		}
	}
346

347 348 349 350
	/*
	 * Phase 3: page putback for pages that qualified for the fast path
	 * This will also call put_page() to return pin from follow_page_mask()
	 */
351 352
	if (pagevec_count(&pvec_putback))
		__putback_lru_fast(&pvec_putback, pgrescued);
353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376
}

/*
 * Fill up pagevec for __munlock_pagevec using pte walk
 *
 * The function expects that the struct page corresponding to @start address is
 * a non-TPH page already pinned and in the @pvec, and that it belongs to @zone.
 *
 * The rest of @pvec is filled by subsequent pages within the same pmd and same
 * zone, as long as the pte's are present and vm_normal_page() succeeds. These
 * pages also get pinned.
 *
 * Returns the address of the next page that should be scanned. This equals
 * @start + PAGE_SIZE when no page could be added by the pte walk.
 */
static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
		struct vm_area_struct *vma, int zoneid,	unsigned long start,
		unsigned long end)
{
	pte_t *pte;
	spinlock_t *ptl;

	/*
	 * Initialize pte walk starting at the already pinned page where we
377 378
	 * are sure that there is a pte, as it was pinned under the same
	 * mmap_sem write op.
379 380
	 */
	pte = get_locked_pte(vma->vm_mm, start,	&ptl);
381 382 383 384
	/* Make sure we do not cross the page table boundary */
	end = pgd_addr_end(start, end);
	end = pud_addr_end(start, end);
	end = pmd_addr_end(start, end);
385 386 387 388 389 390 391 392 393 394 395 396 397 398

	/* The page next to the pinned page is the first we will try to get */
	start += PAGE_SIZE;
	while (start < end) {
		struct page *page = NULL;
		pte++;
		if (pte_present(*pte))
			page = vm_normal_page(vma, start, *pte);
		/*
		 * Break if page could not be obtained or the page's node+zone does not
		 * match
		 */
		if (!page || page_zone_id(page) != zoneid)
			break;
399

400 401 402 403 404 405 406
		/*
		 * Do not use pagevec for PTE-mapped THP,
		 * munlock_vma_pages_range() will handle them.
		 */
		if (PageTransCompound(page))
			break;

407 408 409 410 411 412 413 414 415 416 417
		get_page(page);
		/*
		 * Increase the address that will be returned *before* the
		 * eventual break due to pvec becoming full by adding the page
		 */
		start += PAGE_SIZE;
		if (pagevec_add(pvec, page) == 0)
			break;
	}
	pte_unmap_unlock(pte, ptl);
	return start;
418 419
}

N
Nick Piggin 已提交
420
/*
421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436
 * munlock_vma_pages_range() - munlock all pages in the vma range.'
 * @vma - vma containing range to be munlock()ed.
 * @start - start address in @vma of the range
 * @end - end of range in @vma.
 *
 *  For mremap(), munmap() and exit().
 *
 * Called with @vma VM_LOCKED.
 *
 * Returns with VM_LOCKED cleared.  Callers must be prepared to
 * deal with this.
 *
 * We don't save and restore VM_LOCKED here because pages are
 * still on lru.  In unmap path, pages might be scanned by reclaim
 * and re-mlocked by try_to_{munlock|unmap} before we unmap and
 * free them.  This will result in freeing mlocked pages.
N
Nick Piggin 已提交
437
 */
438
void munlock_vma_pages_range(struct vm_area_struct *vma,
H
Hugh Dickins 已提交
439
			     unsigned long start, unsigned long end)
N
Nick Piggin 已提交
440
{
E
Eric B Munson 已提交
441
	vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
H
Hugh Dickins 已提交
442

443
	while (start < end) {
444
		struct page *page;
445 446
		unsigned int page_mask;
		unsigned long page_increm;
447 448 449
		struct pagevec pvec;
		struct zone *zone;
		int zoneid;
450

451
		pagevec_init(&pvec, 0);
H
Hugh Dickins 已提交
452 453 454 455 456 457 458
		/*
		 * Although FOLL_DUMP is intended for get_dump_page(),
		 * it just so happens that its special treatment of the
		 * ZERO_PAGE (returning an error instead of doing get_page)
		 * suits munlock very well (and if somehow an abnormal page
		 * has sneaked into the range, we won't oops here: great).
		 */
459
		page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP,
460 461
				&page_mask);

462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
		if (page && !IS_ERR(page)) {
			if (PageTransTail(page)) {
				VM_BUG_ON_PAGE(PageMlocked(page), page);
				put_page(page); /* follow_page_mask() */
			} else if (PageTransHuge(page)) {
				lock_page(page);
				/*
				 * Any THP page found by follow_page_mask() may
				 * have gotten split before reaching
				 * munlock_vma_page(), so we need to recompute
				 * the page_mask here.
				 */
				page_mask = munlock_vma_page(page);
				unlock_page(page);
				put_page(page); /* follow_page_mask() */
			} else {
				/*
				 * Non-huge pages are handled in batches via
				 * pagevec. The pin from follow_page_mask()
				 * prevents them from collapsing by THP.
				 */
				pagevec_add(&pvec, page);
				zone = page_zone(page);
				zoneid = page_zone_id(page);
486

487 488 489 490 491 492 493 494 495 496 497
				/*
				 * Try to fill the rest of pagevec using fast
				 * pte walk. This will also update start to
				 * the next page to process. Then munlock the
				 * pagevec.
				 */
				start = __munlock_pagevec_fill(&pvec, vma,
						zoneid, start, end);
				__munlock_pagevec(&pvec, zone);
				goto next;
			}
H
Hugh Dickins 已提交
498
		}
499
		page_increm = 1 + page_mask;
500
		start += page_increm * PAGE_SIZE;
501
next:
H
Hugh Dickins 已提交
502 503
		cond_resched();
	}
N
Nick Piggin 已提交
504 505 506 507 508 509 510
}

/*
 * mlock_fixup  - handle mlock[all]/munlock[all] requests.
 *
 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
 * munlock is a no-op.  However, for some special vmas, we go ahead and
511
 * populate the ptes.
N
Nick Piggin 已提交
512 513 514
 *
 * For vmas that pass the filters, merge/split as appropriate.
 */
L
Linus Torvalds 已提交
515
static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
516
	unsigned long start, unsigned long end, vm_flags_t newflags)
L
Linus Torvalds 已提交
517
{
N
Nick Piggin 已提交
518
	struct mm_struct *mm = vma->vm_mm;
L
Linus Torvalds 已提交
519
	pgoff_t pgoff;
N
Nick Piggin 已提交
520
	int nr_pages;
L
Linus Torvalds 已提交
521
	int ret = 0;
522
	int lock = !!(newflags & VM_LOCKED);
523
	vm_flags_t old_flags = vma->vm_flags;
L
Linus Torvalds 已提交
524

525
	if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
526
	    is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm))
527 528
		/* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */
		goto out;
N
Nick Piggin 已提交
529

L
Linus Torvalds 已提交
530 531
	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
	*prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
532 533
			  vma->vm_file, pgoff, vma_policy(vma),
			  vma->vm_userfaultfd_ctx);
L
Linus Torvalds 已提交
534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551
	if (*prev) {
		vma = *prev;
		goto success;
	}

	if (start != vma->vm_start) {
		ret = split_vma(mm, vma, start, 1);
		if (ret)
			goto out;
	}

	if (end != vma->vm_end) {
		ret = split_vma(mm, vma, end, 0);
		if (ret)
			goto out;
	}

success:
N
Nick Piggin 已提交
552 553 554 555 556 557
	/*
	 * Keep track of amount of locked VM.
	 */
	nr_pages = (end - start) >> PAGE_SHIFT;
	if (!lock)
		nr_pages = -nr_pages;
558 559
	else if (old_flags & VM_LOCKED)
		nr_pages = 0;
N
Nick Piggin 已提交
560 561
	mm->locked_vm += nr_pages;

L
Linus Torvalds 已提交
562 563 564
	/*
	 * vm_flags is protected by the mmap_sem held in write mode.
	 * It's okay if try_to_unmap_one unmaps a page just after we
565
	 * set VM_LOCKED, populate_vma_page_range will bring it back.
L
Linus Torvalds 已提交
566 567
	 */

568
	if (lock)
H
Hugh Dickins 已提交
569
		vma->vm_flags = newflags;
570
	else
H
Hugh Dickins 已提交
571
		munlock_vma_pages_range(vma, start, end);
L
Linus Torvalds 已提交
572 573

out:
N
Nick Piggin 已提交
574
	*prev = vma;
L
Linus Torvalds 已提交
575 576 577
	return ret;
}

578 579
static int apply_vma_lock_flags(unsigned long start, size_t len,
				vm_flags_t flags)
L
Linus Torvalds 已提交
580 581 582 583 584
{
	unsigned long nstart, end, tmp;
	struct vm_area_struct * vma, * prev;
	int error;

585
	VM_BUG_ON(offset_in_page(start));
586
	VM_BUG_ON(len != PAGE_ALIGN(len));
L
Linus Torvalds 已提交
587 588 589 590 591
	end = start + len;
	if (end < start)
		return -EINVAL;
	if (end == start)
		return 0;
592
	vma = find_vma(current->mm, start);
L
Linus Torvalds 已提交
593 594 595
	if (!vma || vma->vm_start > start)
		return -ENOMEM;

596
	prev = vma->vm_prev;
L
Linus Torvalds 已提交
597 598 599 600
	if (start > vma->vm_start)
		prev = vma;

	for (nstart = start ; ; ) {
601
		vm_flags_t newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
L
Linus Torvalds 已提交
602

603
		newflags |= flags;
L
Linus Torvalds 已提交
604

605
		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
L
Linus Torvalds 已提交
606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626
		tmp = vma->vm_end;
		if (tmp > end)
			tmp = end;
		error = mlock_fixup(vma, &prev, nstart, tmp, newflags);
		if (error)
			break;
		nstart = tmp;
		if (nstart < prev->vm_end)
			nstart = prev->vm_end;
		if (nstart >= end)
			break;

		vma = prev->vm_next;
		if (!vma || vma->vm_start != nstart) {
			error = -ENOMEM;
			break;
		}
	}
	return error;
}

627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665
/*
 * Go through vma areas and sum size of mlocked
 * vma pages, as return value.
 * Note deferred memory locking case(mlock2(,,MLOCK_ONFAULT)
 * is also counted.
 * Return value: previously mlocked page counts
 */
static int count_mm_mlocked_page_nr(struct mm_struct *mm,
		unsigned long start, size_t len)
{
	struct vm_area_struct *vma;
	int count = 0;

	if (mm == NULL)
		mm = current->mm;

	vma = find_vma(mm, start);
	if (vma == NULL)
		vma = mm->mmap;

	for (; vma ; vma = vma->vm_next) {
		if (start >= vma->vm_end)
			continue;
		if (start + len <=  vma->vm_start)
			break;
		if (vma->vm_flags & VM_LOCKED) {
			if (start > vma->vm_start)
				count -= (start - vma->vm_start);
			if (start + len < vma->vm_end) {
				count += start + len - vma->vm_start;
				break;
			}
			count += vma->vm_end - vma->vm_start;
		}
	}

	return count >> PAGE_SHIFT;
}

666
static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags)
L
Linus Torvalds 已提交
667 668 669 670 671 672 673 674
{
	unsigned long locked;
	unsigned long lock_limit;
	int error = -ENOMEM;

	if (!can_do_mlock())
		return -EPERM;

675 676
	lru_add_drain_all();	/* flush pagevec */

677
	len = PAGE_ALIGN(len + (offset_in_page(start)));
L
Linus Torvalds 已提交
678 679
	start &= PAGE_MASK;

J
Jiri Slaby 已提交
680
	lock_limit = rlimit(RLIMIT_MEMLOCK);
L
Linus Torvalds 已提交
681
	lock_limit >>= PAGE_SHIFT;
682 683
	locked = len >> PAGE_SHIFT;

684 685
	if (down_write_killable(&current->mm->mmap_sem))
		return -EINTR;
686 687

	locked += current->mm->locked_vm;
688 689 690 691 692 693 694 695 696 697
	if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) {
		/*
		 * It is possible that the regions requested intersect with
		 * previously mlocked areas, that part area in "mm->locked_vm"
		 * should not be counted to new mlock increment count. So check
		 * and adjust locked count if necessary.
		 */
		locked -= count_mm_mlocked_page_nr(current->mm,
				start, len);
	}
L
Linus Torvalds 已提交
698 699 700

	/* check against resource limits */
	if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
701
		error = apply_vma_lock_flags(start, len, flags);
702

L
Linus Torvalds 已提交
703
	up_write(&current->mm->mmap_sem);
704 705 706 707 708 709 710
	if (error)
		return error;

	error = __mm_populate(start, len, 0);
	if (error)
		return __mlock_posix_error_return(error);
	return 0;
L
Linus Torvalds 已提交
711 712
}

713 714 715 716 717
SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
{
	return do_mlock(start, len, VM_LOCKED);
}

718 719
SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags)
{
720 721 722
	vm_flags_t vm_flags = VM_LOCKED;

	if (flags & ~MLOCK_ONFAULT)
723 724
		return -EINVAL;

725 726 727 728
	if (flags & MLOCK_ONFAULT)
		vm_flags |= VM_LOCKONFAULT;

	return do_mlock(start, len, vm_flags);
729 730
}

731
SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
L
Linus Torvalds 已提交
732 733 734
{
	int ret;

735
	len = PAGE_ALIGN(len + (offset_in_page(start)));
L
Linus Torvalds 已提交
736
	start &= PAGE_MASK;
737

738 739
	if (down_write_killable(&current->mm->mmap_sem))
		return -EINTR;
740
	ret = apply_vma_lock_flags(start, len, 0);
L
Linus Torvalds 已提交
741
	up_write(&current->mm->mmap_sem);
742

L
Linus Torvalds 已提交
743 744 745
	return ret;
}

746 747 748 749 750 751 752 753 754 755
/*
 * Take the MCL_* flags passed into mlockall (or 0 if called from munlockall)
 * and translate into the appropriate modifications to mm->def_flags and/or the
 * flags for all current VMAs.
 *
 * There are a couple of subtleties with this.  If mlockall() is called multiple
 * times with different flags, the values do not necessarily stack.  If mlockall
 * is called once including the MCL_FUTURE flag and then a second time without
 * it, VM_LOCKED and VM_LOCKONFAULT will be cleared from mm->def_flags.
 */
756
static int apply_mlockall_flags(int flags)
L
Linus Torvalds 已提交
757 758
{
	struct vm_area_struct * vma, * prev = NULL;
759
	vm_flags_t to_add = 0;
L
Linus Torvalds 已提交
760

761 762
	current->mm->def_flags &= VM_LOCKED_CLEAR_MASK;
	if (flags & MCL_FUTURE) {
763
		current->mm->def_flags |= VM_LOCKED;
764

765 766 767 768 769 770 771 772 773 774 775 776
		if (flags & MCL_ONFAULT)
			current->mm->def_flags |= VM_LOCKONFAULT;

		if (!(flags & MCL_CURRENT))
			goto out;
	}

	if (flags & MCL_CURRENT) {
		to_add |= VM_LOCKED;
		if (flags & MCL_ONFAULT)
			to_add |= VM_LOCKONFAULT;
	}
L
Linus Torvalds 已提交
777 778

	for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
779
		vm_flags_t newflags;
L
Linus Torvalds 已提交
780

781 782
		newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
		newflags |= to_add;
L
Linus Torvalds 已提交
783 784 785

		/* Ignore errors */
		mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
786
		cond_resched_rcu_qs();
L
Linus Torvalds 已提交
787 788 789 790 791
	}
out:
	return 0;
}

792
SYSCALL_DEFINE1(mlockall, int, flags)
L
Linus Torvalds 已提交
793 794
{
	unsigned long lock_limit;
795
	int ret;
L
Linus Torvalds 已提交
796

797
	if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)))
798
		return -EINVAL;
L
Linus Torvalds 已提交
799 800

	if (!can_do_mlock())
801
		return -EPERM;
L
Linus Torvalds 已提交
802

803 804
	if (flags & MCL_CURRENT)
		lru_add_drain_all();	/* flush pagevec */
805

J
Jiri Slaby 已提交
806
	lock_limit = rlimit(RLIMIT_MEMLOCK);
L
Linus Torvalds 已提交
807 808
	lock_limit >>= PAGE_SHIFT;

809 810
	if (down_write_killable(&current->mm->mmap_sem))
		return -EINTR;
811

812
	ret = -ENOMEM;
L
Linus Torvalds 已提交
813 814
	if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
	    capable(CAP_IPC_LOCK))
815
		ret = apply_mlockall_flags(flags);
L
Linus Torvalds 已提交
816
	up_write(&current->mm->mmap_sem);
817 818
	if (!ret && (flags & MCL_CURRENT))
		mm_populate(0, TASK_SIZE);
819

L
Linus Torvalds 已提交
820 821 822
	return ret;
}

823
SYSCALL_DEFINE0(munlockall)
L
Linus Torvalds 已提交
824 825 826
{
	int ret;

827 828
	if (down_write_killable(&current->mm->mmap_sem))
		return -EINTR;
829
	ret = apply_mlockall_flags(0);
L
Linus Torvalds 已提交
830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845
	up_write(&current->mm->mmap_sem);
	return ret;
}

/*
 * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
 * shm segments) get accounted against the user_struct instead.
 */
static DEFINE_SPINLOCK(shmlock_user_lock);

int user_shm_lock(size_t size, struct user_struct *user)
{
	unsigned long lock_limit, locked;
	int allowed = 0;

	locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
J
Jiri Slaby 已提交
846
	lock_limit = rlimit(RLIMIT_MEMLOCK);
847 848
	if (lock_limit == RLIM_INFINITY)
		allowed = 1;
L
Linus Torvalds 已提交
849 850
	lock_limit >>= PAGE_SHIFT;
	spin_lock(&shmlock_user_lock);
851 852
	if (!allowed &&
	    locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
L
Linus Torvalds 已提交
853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868
		goto out;
	get_uid(user);
	user->locked_shm += locked;
	allowed = 1;
out:
	spin_unlock(&shmlock_user_lock);
	return allowed;
}

void user_shm_unlock(size_t size, struct user_struct *user)
{
	spin_lock(&shmlock_user_lock);
	user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
	spin_unlock(&shmlock_user_lock);
	free_uid(user);
}