mlock.c 22.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2 3 4 5 6 7 8
/*
 *	linux/mm/mlock.c
 *
 *  (C) Copyright 1995 Linus Torvalds
 *  (C) Copyright 2002 Christoph Hellwig
 */

9
#include <linux/capability.h>
L
Linus Torvalds 已提交
10 11
#include <linux/mman.h>
#include <linux/mm.h>
12
#include <linux/sched/user.h>
N
Nick Piggin 已提交
13 14 15
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/pagemap.h>
16
#include <linux/pagevec.h>
L
Linus Torvalds 已提交
17 18
#include <linux/mempolicy.h>
#include <linux/syscalls.h>
A
Alexey Dobriyan 已提交
19
#include <linux/sched.h>
20
#include <linux/export.h>
N
Nick Piggin 已提交
21 22 23
#include <linux/rmap.h>
#include <linux/mmzone.h>
#include <linux/hugetlb.h>
24 25
#include <linux/memcontrol.h>
#include <linux/mm_inline.h>
N
Nick Piggin 已提交
26 27

#include "internal.h"
L
Linus Torvalds 已提交
28

29
bool can_do_mlock(void)
A
Alexey Dobriyan 已提交
30
{
J
Jiri Slaby 已提交
31
	if (rlimit(RLIMIT_MEMLOCK) != 0)
32
		return true;
33
	if (capable(CAP_IPC_LOCK))
34 35
		return true;
	return false;
A
Alexey Dobriyan 已提交
36 37
}
EXPORT_SYMBOL(can_do_mlock);
L
Linus Torvalds 已提交
38

N
Nick Piggin 已提交
39 40 41 42 43 44 45 46 47 48 49 50 51
/*
 * Mlocked pages are marked with PageMlocked() flag for efficient testing
 * in vmscan and, possibly, the fault path; and to support semi-accurate
 * statistics.
 *
 * An mlocked page [PageMlocked(page)] is unevictable.  As such, it will
 * be placed on the LRU "unevictable" list, rather than the [in]active lists.
 * The unevictable list is an LRU sibling list to the [in]active lists.
 * PageUnevictable is set to indicate the unevictable state.
 *
 * When lazy mlocking via vmscan, it is important to ensure that the
 * vma's VM_LOCKED status is not concurrently being modified, otherwise we
 * may have mlocked a page that is being munlocked. So lazy mlock must take
52
 * the mmap_lock for read, and verify that the vma really is locked
N
Nick Piggin 已提交
53 54 55 56 57 58
 * (see mm/rmap.c).
 */

/*
 *  LRU accounting for clear_page_mlock()
 */
59
void clear_page_mlock(struct page *page)
N
Nick Piggin 已提交
60
{
61 62
	int nr_pages;

63
	if (!TestClearPageMlocked(page))
N
Nick Piggin 已提交
64 65
		return;

66 67 68
	nr_pages = thp_nr_pages(page);
	mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
	count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages);
69 70 71 72 73 74
	/*
	 * The previous TestClearPageMlocked() corresponds to the smp_mb()
	 * in __pagevec_lru_add_fn().
	 *
	 * See __pagevec_lru_add_fn for more explanation.
	 */
N
Nick Piggin 已提交
75 76 77 78
	if (!isolate_lru_page(page)) {
		putback_lru_page(page);
	} else {
		/*
79
		 * We lost the race. the page already moved to evictable list.
N
Nick Piggin 已提交
80
		 */
81
		if (PageUnevictable(page))
82
			count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
N
Nick Piggin 已提交
83 84 85 86 87 88 89 90 91
	}
}

/*
 * Mark page as mlocked if not already.
 * If page on LRU, isolate and putback to move to unevictable list.
 */
void mlock_vma_page(struct page *page)
{
92
	/* Serialize with page migration */
N
Nick Piggin 已提交
93 94
	BUG_ON(!PageLocked(page));

95 96 97
	VM_BUG_ON_PAGE(PageTail(page), page);
	VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);

N
Nick Piggin 已提交
98
	if (!TestSetPageMlocked(page)) {
99 100 101 102
		int nr_pages = thp_nr_pages(page);

		mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
		count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
N
Nick Piggin 已提交
103 104 105
		if (!isolate_lru_page(page))
			putback_lru_page(page);
	}
N
Nick Piggin 已提交
106 107
}

108 109 110 111 112 113 114 115 116 117 118 119 120
/*
 * Finish munlock after successful page isolation
 *
 * Page must be locked. This is a wrapper for try_to_munlock()
 * and putback_lru_page() with munlock accounting.
 */
static void __munlock_isolated_page(struct page *page)
{
	/*
	 * Optimization: if the page was mapped just once, that's our mapping
	 * and we don't need to check all the other vmas.
	 */
	if (page_mapcount(page) > 1)
121
		try_to_munlock(page);
122 123

	/* Did try_to_unlock() succeed or punt? */
124
	if (!PageMlocked(page))
125
		count_vm_events(UNEVICTABLE_PGMUNLOCKED, thp_nr_pages(page));
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140

	putback_lru_page(page);
}

/*
 * Accounting for page isolation fail during munlock
 *
 * Performs accounting when page isolation fails in munlock. There is nothing
 * else to do because it means some other task has already removed the page
 * from the LRU. putback_lru_page() will take care of removing the page from
 * the unevictable list, if necessary. vmscan [page_referenced()] will move
 * the page back to the unevictable list if some other vma has it mlocked.
 */
static void __munlock_isolation_failed(struct page *page)
{
141 142
	int nr_pages = thp_nr_pages(page);

143
	if (PageUnevictable(page))
144
		__count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
145
	else
146
		__count_vm_events(UNEVICTABLE_PGMUNLOCKED, nr_pages);
147 148
}

149 150
/**
 * munlock_vma_page - munlock a vma page
M
Mike Rapoport 已提交
151
 * @page: page to be unlocked, either a normal page or THP page head
152 153 154
 *
 * returns the size of the page as a page mask (0 for normal page,
 *         HPAGE_PMD_NR - 1 for THP head page)
N
Nick Piggin 已提交
155
 *
156 157 158 159 160 161 162 163 164 165
 * called from munlock()/munmap() path with page supposedly on the LRU.
 * When we munlock a page, because the vma where we found the page is being
 * munlock()ed or munmap()ed, we want to check whether other vmas hold the
 * page locked so that we can leave it on the unevictable lru list and not
 * bother vmscan with it.  However, to walk the page's rmap list in
 * try_to_munlock() we must isolate the page from the LRU.  If some other
 * task has removed the page from the LRU, we won't be able to do that.
 * So we clear the PageMlocked as we might not get another chance.  If we
 * can't isolate the page, we leave it for putback_lru_page() and vmscan
 * [page_referenced()/try_to_unmap()] to deal with.
N
Nick Piggin 已提交
166
 */
167
unsigned int munlock_vma_page(struct page *page)
N
Nick Piggin 已提交
168
{
K
Kirill A. Shutemov 已提交
169
	int nr_pages;
170

171
	/* For try_to_munlock() and to serialize with page migration */
N
Nick Piggin 已提交
172
	BUG_ON(!PageLocked(page));
173 174
	VM_BUG_ON_PAGE(PageTail(page), page);

175 176
	if (!TestClearPageMlocked(page)) {
		/* Potentially, PTE-mapped THP: do not skip the rest PTEs */
177
		return 0;
178
	}
179

180
	nr_pages = thp_nr_pages(page);
181
	mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
182

183
	if (!isolate_lru_page(page))
184
		__munlock_isolated_page(page);
185 186
	else
		__munlock_isolation_failed(page);
187

188
	return nr_pages - 1;
N
Nick Piggin 已提交
189 190
}

191 192 193 194 195 196 197 198 199 200
/*
 * convert get_user_pages() return value to posix mlock() error
 */
static int __mlock_posix_error_return(long retval)
{
	if (retval == -EFAULT)
		retval = -ENOMEM;
	else if (retval == -ENOMEM)
		retval = -EAGAIN;
	return retval;
N
Nick Piggin 已提交
201 202
}

203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
/*
 * Prepare page for fast batched LRU putback via putback_lru_evictable_pagevec()
 *
 * The fast path is available only for evictable pages with single mapping.
 * Then we can bypass the per-cpu pvec and get better performance.
 * when mapcount > 1 we need try_to_munlock() which can fail.
 * when !page_evictable(), we need the full redo logic of putback_lru_page to
 * avoid leaving evictable page in unevictable list.
 *
 * In case of success, @page is added to @pvec and @pgrescued is incremented
 * in case that the page was previously unevictable. @page is also unlocked.
 */
static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec,
		int *pgrescued)
{
218 219
	VM_BUG_ON_PAGE(PageLRU(page), page);
	VM_BUG_ON_PAGE(!PageLocked(page), page);
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248

	if (page_mapcount(page) <= 1 && page_evictable(page)) {
		pagevec_add(pvec, page);
		if (TestClearPageUnevictable(page))
			(*pgrescued)++;
		unlock_page(page);
		return true;
	}

	return false;
}

/*
 * Putback multiple evictable pages to the LRU
 *
 * Batched putback of evictable pages that bypasses the per-cpu pvec. Some of
 * the pages might have meanwhile become unevictable but that is OK.
 */
static void __putback_lru_fast(struct pagevec *pvec, int pgrescued)
{
	count_vm_events(UNEVICTABLE_PGMUNLOCKED, pagevec_count(pvec));
	/*
	 *__pagevec_lru_add() calls release_pages() so we don't call
	 * put_page() explicitly
	 */
	__pagevec_lru_add(pvec);
	count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
}

249 250 251 252 253 254 255 256
/*
 * Munlock a batch of pages from the same zone
 *
 * The work is split to two main phases. First phase clears the Mlocked flag
 * and attempts to isolate the pages, all under a single zone lru lock.
 * The second phase finishes the munlock only for pages where isolation
 * succeeded.
 *
257
 * Note that the pagevec may be modified during the process.
258 259 260 261 262
 */
static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
{
	int i;
	int nr = pagevec_count(pvec);
263
	int delta_munlocked = -nr;
264 265
	struct pagevec pvec_putback;
	int pgrescued = 0;
266

267
	pagevec_init(&pvec_putback);
268

269
	/* Phase 1: page isolation */
270
	spin_lock_irq(&zone->zone_pgdat->lru_lock);
271 272 273 274 275
	for (i = 0; i < nr; i++) {
		struct page *page = pvec->pages[i];

		if (TestClearPageMlocked(page)) {
			/*
276 277
			 * We already have pin from follow_page_mask()
			 * so we can spare the get_page() here.
278
			 */
279 280 281 282 283 284 285 286
			if (PageLRU(page)) {
				struct lruvec *lruvec;

				ClearPageLRU(page);
				lruvec = mem_cgroup_page_lruvec(page,
							page_pgdat(page));
				del_page_from_lru_list(page, lruvec,
							page_lru(page));
287
				continue;
288
			} else
289
				__munlock_isolation_failed(page);
290 291
		} else {
			delta_munlocked++;
292
		}
293 294 295 296 297 298 299 300 301

		/*
		 * We won't be munlocking this page in the next phase
		 * but we still need to release the follow_page_mask()
		 * pin. We cannot do it under lru_lock however. If it's
		 * the last pin, __page_cache_release() would deadlock.
		 */
		pagevec_add(&pvec_putback, pvec->pages[i]);
		pvec->pages[i] = NULL;
302
	}
303
	__mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
304
	spin_unlock_irq(&zone->zone_pgdat->lru_lock);
305

306 307 308
	/* Now we can release pins of pages that we are not munlocking */
	pagevec_release(&pvec_putback);

309
	/* Phase 2: page munlock */
310 311 312 313 314
	for (i = 0; i < nr; i++) {
		struct page *page = pvec->pages[i];

		if (page) {
			lock_page(page);
315 316
			if (!__putback_lru_fast_prepare(page, &pvec_putback,
					&pgrescued)) {
317 318 319 320 321
				/*
				 * Slow path. We don't want to lose the last
				 * pin before unlock_page()
				 */
				get_page(page); /* for putback_lru_page() */
322 323
				__munlock_isolated_page(page);
				unlock_page(page);
324
				put_page(page); /* from follow_page_mask() */
325
			}
326 327
		}
	}
328

329 330 331 332
	/*
	 * Phase 3: page putback for pages that qualified for the fast path
	 * This will also call put_page() to return pin from follow_page_mask()
	 */
333 334
	if (pagevec_count(&pvec_putback))
		__putback_lru_fast(&pvec_putback, pgrescued);
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
}

/*
 * Fill up pagevec for __munlock_pagevec using pte walk
 *
 * The function expects that the struct page corresponding to @start address is
 * a non-TPH page already pinned and in the @pvec, and that it belongs to @zone.
 *
 * The rest of @pvec is filled by subsequent pages within the same pmd and same
 * zone, as long as the pte's are present and vm_normal_page() succeeds. These
 * pages also get pinned.
 *
 * Returns the address of the next page that should be scanned. This equals
 * @start + PAGE_SIZE when no page could be added by the pte walk.
 */
static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
351 352
			struct vm_area_struct *vma, struct zone *zone,
			unsigned long start, unsigned long end)
353 354 355 356 357 358
{
	pte_t *pte;
	spinlock_t *ptl;

	/*
	 * Initialize pte walk starting at the already pinned page where we
359
	 * are sure that there is a pte, as it was pinned under the same
360
	 * mmap_lock write op.
361 362
	 */
	pte = get_locked_pte(vma->vm_mm, start,	&ptl);
363 364
	/* Make sure we do not cross the page table boundary */
	end = pgd_addr_end(start, end);
365
	end = p4d_addr_end(start, end);
366 367
	end = pud_addr_end(start, end);
	end = pmd_addr_end(start, end);
368 369 370 371 372 373 374 375 376 377 378 379

	/* The page next to the pinned page is the first we will try to get */
	start += PAGE_SIZE;
	while (start < end) {
		struct page *page = NULL;
		pte++;
		if (pte_present(*pte))
			page = vm_normal_page(vma, start, *pte);
		/*
		 * Break if page could not be obtained or the page's node+zone does not
		 * match
		 */
380
		if (!page || page_zone(page) != zone)
381
			break;
382

383 384 385 386 387 388 389
		/*
		 * Do not use pagevec for PTE-mapped THP,
		 * munlock_vma_pages_range() will handle them.
		 */
		if (PageTransCompound(page))
			break;

390 391 392 393 394 395 396 397 398 399 400
		get_page(page);
		/*
		 * Increase the address that will be returned *before* the
		 * eventual break due to pvec becoming full by adding the page
		 */
		start += PAGE_SIZE;
		if (pagevec_add(pvec, page) == 0)
			break;
	}
	pte_unmap_unlock(pte, ptl);
	return start;
401 402
}

N
Nick Piggin 已提交
403
/*
404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419
 * munlock_vma_pages_range() - munlock all pages in the vma range.'
 * @vma - vma containing range to be munlock()ed.
 * @start - start address in @vma of the range
 * @end - end of range in @vma.
 *
 *  For mremap(), munmap() and exit().
 *
 * Called with @vma VM_LOCKED.
 *
 * Returns with VM_LOCKED cleared.  Callers must be prepared to
 * deal with this.
 *
 * We don't save and restore VM_LOCKED here because pages are
 * still on lru.  In unmap path, pages might be scanned by reclaim
 * and re-mlocked by try_to_{munlock|unmap} before we unmap and
 * free them.  This will result in freeing mlocked pages.
N
Nick Piggin 已提交
420
 */
421
void munlock_vma_pages_range(struct vm_area_struct *vma,
H
Hugh Dickins 已提交
422
			     unsigned long start, unsigned long end)
N
Nick Piggin 已提交
423
{
E
Eric B Munson 已提交
424
	vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
H
Hugh Dickins 已提交
425

426
	while (start < end) {
427
		struct page *page;
428
		unsigned int page_mask = 0;
429
		unsigned long page_increm;
430 431
		struct pagevec pvec;
		struct zone *zone;
432

433
		pagevec_init(&pvec);
H
Hugh Dickins 已提交
434 435 436 437 438 439 440
		/*
		 * Although FOLL_DUMP is intended for get_dump_page(),
		 * it just so happens that its special treatment of the
		 * ZERO_PAGE (returning an error instead of doing get_page)
		 * suits munlock very well (and if somehow an abnormal page
		 * has sneaked into the range, we won't oops here: great).
		 */
441
		page = follow_page(vma, start, FOLL_GET | FOLL_DUMP);
442

443 444 445 446 447 448 449 450 451
		if (page && !IS_ERR(page)) {
			if (PageTransTail(page)) {
				VM_BUG_ON_PAGE(PageMlocked(page), page);
				put_page(page); /* follow_page_mask() */
			} else if (PageTransHuge(page)) {
				lock_page(page);
				/*
				 * Any THP page found by follow_page_mask() may
				 * have gotten split before reaching
452 453
				 * munlock_vma_page(), so we need to compute
				 * the page_mask here instead.
454 455 456 457 458 459 460 461 462 463 464 465
				 */
				page_mask = munlock_vma_page(page);
				unlock_page(page);
				put_page(page); /* follow_page_mask() */
			} else {
				/*
				 * Non-huge pages are handled in batches via
				 * pagevec. The pin from follow_page_mask()
				 * prevents them from collapsing by THP.
				 */
				pagevec_add(&pvec, page);
				zone = page_zone(page);
466

467 468 469 470 471 472 473
				/*
				 * Try to fill the rest of pagevec using fast
				 * pte walk. This will also update start to
				 * the next page to process. Then munlock the
				 * pagevec.
				 */
				start = __munlock_pagevec_fill(&pvec, vma,
474
						zone, start, end);
475 476 477
				__munlock_pagevec(&pvec, zone);
				goto next;
			}
H
Hugh Dickins 已提交
478
		}
479
		page_increm = 1 + page_mask;
480
		start += page_increm * PAGE_SIZE;
481
next:
H
Hugh Dickins 已提交
482 483
		cond_resched();
	}
N
Nick Piggin 已提交
484 485 486 487 488 489 490
}

/*
 * mlock_fixup  - handle mlock[all]/munlock[all] requests.
 *
 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
 * munlock is a no-op.  However, for some special vmas, we go ahead and
491
 * populate the ptes.
N
Nick Piggin 已提交
492 493 494
 *
 * For vmas that pass the filters, merge/split as appropriate.
 */
L
Linus Torvalds 已提交
495
static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
496
	unsigned long start, unsigned long end, vm_flags_t newflags)
L
Linus Torvalds 已提交
497
{
N
Nick Piggin 已提交
498
	struct mm_struct *mm = vma->vm_mm;
L
Linus Torvalds 已提交
499
	pgoff_t pgoff;
N
Nick Piggin 已提交
500
	int nr_pages;
L
Linus Torvalds 已提交
501
	int ret = 0;
502
	int lock = !!(newflags & VM_LOCKED);
503
	vm_flags_t old_flags = vma->vm_flags;
L
Linus Torvalds 已提交
504

505
	if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
506 507
	    is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) ||
	    vma_is_dax(vma))
508 509
		/* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */
		goto out;
N
Nick Piggin 已提交
510

L
Linus Torvalds 已提交
511 512
	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
	*prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
513 514
			  vma->vm_file, pgoff, vma_policy(vma),
			  vma->vm_userfaultfd_ctx);
L
Linus Torvalds 已提交
515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
	if (*prev) {
		vma = *prev;
		goto success;
	}

	if (start != vma->vm_start) {
		ret = split_vma(mm, vma, start, 1);
		if (ret)
			goto out;
	}

	if (end != vma->vm_end) {
		ret = split_vma(mm, vma, end, 0);
		if (ret)
			goto out;
	}

success:
N
Nick Piggin 已提交
533 534 535 536 537 538
	/*
	 * Keep track of amount of locked VM.
	 */
	nr_pages = (end - start) >> PAGE_SHIFT;
	if (!lock)
		nr_pages = -nr_pages;
539 540
	else if (old_flags & VM_LOCKED)
		nr_pages = 0;
N
Nick Piggin 已提交
541 542
	mm->locked_vm += nr_pages;

L
Linus Torvalds 已提交
543
	/*
544
	 * vm_flags is protected by the mmap_lock held in write mode.
L
Linus Torvalds 已提交
545
	 * It's okay if try_to_unmap_one unmaps a page just after we
546
	 * set VM_LOCKED, populate_vma_page_range will bring it back.
L
Linus Torvalds 已提交
547 548
	 */

549
	if (lock)
H
Hugh Dickins 已提交
550
		vma->vm_flags = newflags;
551
	else
H
Hugh Dickins 已提交
552
		munlock_vma_pages_range(vma, start, end);
L
Linus Torvalds 已提交
553 554

out:
N
Nick Piggin 已提交
555
	*prev = vma;
L
Linus Torvalds 已提交
556 557 558
	return ret;
}

559 560
static int apply_vma_lock_flags(unsigned long start, size_t len,
				vm_flags_t flags)
L
Linus Torvalds 已提交
561 562 563 564 565
{
	unsigned long nstart, end, tmp;
	struct vm_area_struct * vma, * prev;
	int error;

566
	VM_BUG_ON(offset_in_page(start));
567
	VM_BUG_ON(len != PAGE_ALIGN(len));
L
Linus Torvalds 已提交
568 569 570 571 572
	end = start + len;
	if (end < start)
		return -EINVAL;
	if (end == start)
		return 0;
573
	vma = find_vma(current->mm, start);
L
Linus Torvalds 已提交
574 575 576
	if (!vma || vma->vm_start > start)
		return -ENOMEM;

577
	prev = vma->vm_prev;
L
Linus Torvalds 已提交
578 579 580 581
	if (start > vma->vm_start)
		prev = vma;

	for (nstart = start ; ; ) {
582
		vm_flags_t newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
L
Linus Torvalds 已提交
583

584
		newflags |= flags;
L
Linus Torvalds 已提交
585

586
		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
L
Linus Torvalds 已提交
587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607
		tmp = vma->vm_end;
		if (tmp > end)
			tmp = end;
		error = mlock_fixup(vma, &prev, nstart, tmp, newflags);
		if (error)
			break;
		nstart = tmp;
		if (nstart < prev->vm_end)
			nstart = prev->vm_end;
		if (nstart >= end)
			break;

		vma = prev->vm_next;
		if (!vma || vma->vm_start != nstart) {
			error = -ENOMEM;
			break;
		}
	}
	return error;
}

608 609 610 611 612 613 614
/*
 * Go through vma areas and sum size of mlocked
 * vma pages, as return value.
 * Note deferred memory locking case(mlock2(,,MLOCK_ONFAULT)
 * is also counted.
 * Return value: previously mlocked page counts
 */
615
static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
616 617 618
		unsigned long start, size_t len)
{
	struct vm_area_struct *vma;
619
	unsigned long count = 0;
620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646

	if (mm == NULL)
		mm = current->mm;

	vma = find_vma(mm, start);
	if (vma == NULL)
		vma = mm->mmap;

	for (; vma ; vma = vma->vm_next) {
		if (start >= vma->vm_end)
			continue;
		if (start + len <=  vma->vm_start)
			break;
		if (vma->vm_flags & VM_LOCKED) {
			if (start > vma->vm_start)
				count -= (start - vma->vm_start);
			if (start + len < vma->vm_end) {
				count += start + len - vma->vm_start;
				break;
			}
			count += vma->vm_end - vma->vm_start;
		}
	}

	return count >> PAGE_SHIFT;
}

647
static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags)
L
Linus Torvalds 已提交
648 649 650 651 652
{
	unsigned long locked;
	unsigned long lock_limit;
	int error = -ENOMEM;

653 654
	start = untagged_addr(start);

L
Linus Torvalds 已提交
655 656 657
	if (!can_do_mlock())
		return -EPERM;

658
	len = PAGE_ALIGN(len + (offset_in_page(start)));
L
Linus Torvalds 已提交
659 660
	start &= PAGE_MASK;

J
Jiri Slaby 已提交
661
	lock_limit = rlimit(RLIMIT_MEMLOCK);
L
Linus Torvalds 已提交
662
	lock_limit >>= PAGE_SHIFT;
663 664
	locked = len >> PAGE_SHIFT;

665
	if (mmap_write_lock_killable(current->mm))
666
		return -EINTR;
667 668

	locked += current->mm->locked_vm;
669 670 671 672 673 674 675 676 677 678
	if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) {
		/*
		 * It is possible that the regions requested intersect with
		 * previously mlocked areas, that part area in "mm->locked_vm"
		 * should not be counted to new mlock increment count. So check
		 * and adjust locked count if necessary.
		 */
		locked -= count_mm_mlocked_page_nr(current->mm,
				start, len);
	}
L
Linus Torvalds 已提交
679 680 681

	/* check against resource limits */
	if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
682
		error = apply_vma_lock_flags(start, len, flags);
683

684
	mmap_write_unlock(current->mm);
685 686 687 688 689 690 691
	if (error)
		return error;

	error = __mm_populate(start, len, 0);
	if (error)
		return __mlock_posix_error_return(error);
	return 0;
L
Linus Torvalds 已提交
692 693
}

694 695 696 697 698
SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
{
	return do_mlock(start, len, VM_LOCKED);
}

699 700
SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags)
{
701 702 703
	vm_flags_t vm_flags = VM_LOCKED;

	if (flags & ~MLOCK_ONFAULT)
704 705
		return -EINVAL;

706 707 708 709
	if (flags & MLOCK_ONFAULT)
		vm_flags |= VM_LOCKONFAULT;

	return do_mlock(start, len, vm_flags);
710 711
}

712
SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
L
Linus Torvalds 已提交
713 714 715
{
	int ret;

716 717
	start = untagged_addr(start);

718
	len = PAGE_ALIGN(len + (offset_in_page(start)));
L
Linus Torvalds 已提交
719
	start &= PAGE_MASK;
720

721
	if (mmap_write_lock_killable(current->mm))
722
		return -EINTR;
723
	ret = apply_vma_lock_flags(start, len, 0);
724
	mmap_write_unlock(current->mm);
725

L
Linus Torvalds 已提交
726 727 728
	return ret;
}

729 730 731 732 733 734 735 736 737 738
/*
 * Take the MCL_* flags passed into mlockall (or 0 if called from munlockall)
 * and translate into the appropriate modifications to mm->def_flags and/or the
 * flags for all current VMAs.
 *
 * There are a couple of subtleties with this.  If mlockall() is called multiple
 * times with different flags, the values do not necessarily stack.  If mlockall
 * is called once including the MCL_FUTURE flag and then a second time without
 * it, VM_LOCKED and VM_LOCKONFAULT will be cleared from mm->def_flags.
 */
739
static int apply_mlockall_flags(int flags)
L
Linus Torvalds 已提交
740 741
{
	struct vm_area_struct * vma, * prev = NULL;
742
	vm_flags_t to_add = 0;
L
Linus Torvalds 已提交
743

744 745
	current->mm->def_flags &= VM_LOCKED_CLEAR_MASK;
	if (flags & MCL_FUTURE) {
746
		current->mm->def_flags |= VM_LOCKED;
747

748 749 750 751 752 753 754 755 756 757 758 759
		if (flags & MCL_ONFAULT)
			current->mm->def_flags |= VM_LOCKONFAULT;

		if (!(flags & MCL_CURRENT))
			goto out;
	}

	if (flags & MCL_CURRENT) {
		to_add |= VM_LOCKED;
		if (flags & MCL_ONFAULT)
			to_add |= VM_LOCKONFAULT;
	}
L
Linus Torvalds 已提交
760 761

	for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
762
		vm_flags_t newflags;
L
Linus Torvalds 已提交
763

764 765
		newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
		newflags |= to_add;
L
Linus Torvalds 已提交
766 767 768

		/* Ignore errors */
		mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
769
		cond_resched();
L
Linus Torvalds 已提交
770 771 772 773 774
	}
out:
	return 0;
}

775
SYSCALL_DEFINE1(mlockall, int, flags)
L
Linus Torvalds 已提交
776 777
{
	unsigned long lock_limit;
778
	int ret;
L
Linus Torvalds 已提交
779

780 781
	if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)) ||
	    flags == MCL_ONFAULT)
782
		return -EINVAL;
L
Linus Torvalds 已提交
783 784

	if (!can_do_mlock())
785
		return -EPERM;
L
Linus Torvalds 已提交
786

J
Jiri Slaby 已提交
787
	lock_limit = rlimit(RLIMIT_MEMLOCK);
L
Linus Torvalds 已提交
788 789
	lock_limit >>= PAGE_SHIFT;

790
	if (mmap_write_lock_killable(current->mm))
791
		return -EINTR;
792

793
	ret = -ENOMEM;
L
Linus Torvalds 已提交
794 795
	if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
	    capable(CAP_IPC_LOCK))
796
		ret = apply_mlockall_flags(flags);
797
	mmap_write_unlock(current->mm);
798 799
	if (!ret && (flags & MCL_CURRENT))
		mm_populate(0, TASK_SIZE);
800

L
Linus Torvalds 已提交
801 802 803
	return ret;
}

804
SYSCALL_DEFINE0(munlockall)
L
Linus Torvalds 已提交
805 806 807
{
	int ret;

808
	if (mmap_write_lock_killable(current->mm))
809
		return -EINTR;
810
	ret = apply_mlockall_flags(0);
811
	mmap_write_unlock(current->mm);
L
Linus Torvalds 已提交
812 813 814 815 816 817 818 819 820 821 822 823 824 825 826
	return ret;
}

/*
 * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
 * shm segments) get accounted against the user_struct instead.
 */
static DEFINE_SPINLOCK(shmlock_user_lock);

int user_shm_lock(size_t size, struct user_struct *user)
{
	unsigned long lock_limit, locked;
	int allowed = 0;

	locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
J
Jiri Slaby 已提交
827
	lock_limit = rlimit(RLIMIT_MEMLOCK);
828 829
	if (lock_limit == RLIM_INFINITY)
		allowed = 1;
L
Linus Torvalds 已提交
830 831
	lock_limit >>= PAGE_SHIFT;
	spin_lock(&shmlock_user_lock);
832 833
	if (!allowed &&
	    locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
L
Linus Torvalds 已提交
834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849
		goto out;
	get_uid(user);
	user->locked_shm += locked;
	allowed = 1;
out:
	spin_unlock(&shmlock_user_lock);
	return allowed;
}

void user_shm_unlock(size_t size, struct user_struct *user)
{
	spin_lock(&shmlock_user_lock);
	user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
	spin_unlock(&shmlock_user_lock);
	free_uid(user);
}