mlock.c 22.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7
/*
 *	linux/mm/mlock.c
 *
 *  (C) Copyright 1995 Linus Torvalds
 *  (C) Copyright 2002 Christoph Hellwig
 */

8
#include <linux/capability.h>
L
Linus Torvalds 已提交
9 10
#include <linux/mman.h>
#include <linux/mm.h>
11
#include <linux/sched/user.h>
N
Nick Piggin 已提交
12 13 14
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/pagemap.h>
15
#include <linux/pagevec.h>
L
Linus Torvalds 已提交
16 17
#include <linux/mempolicy.h>
#include <linux/syscalls.h>
A
Alexey Dobriyan 已提交
18
#include <linux/sched.h>
19
#include <linux/export.h>
N
Nick Piggin 已提交
20 21 22
#include <linux/rmap.h>
#include <linux/mmzone.h>
#include <linux/hugetlb.h>
23 24
#include <linux/memcontrol.h>
#include <linux/mm_inline.h>
N
Nick Piggin 已提交
25 26

#include "internal.h"
L
Linus Torvalds 已提交
27

28
bool can_do_mlock(void)
A
Alexey Dobriyan 已提交
29
{
J
Jiri Slaby 已提交
30
	if (rlimit(RLIMIT_MEMLOCK) != 0)
31
		return true;
32
	if (capable(CAP_IPC_LOCK))
33 34
		return true;
	return false;
A
Alexey Dobriyan 已提交
35 36
}
EXPORT_SYMBOL(can_do_mlock);
L
Linus Torvalds 已提交
37

N
Nick Piggin 已提交
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
/*
 * Mlocked pages are marked with PageMlocked() flag for efficient testing
 * in vmscan and, possibly, the fault path; and to support semi-accurate
 * statistics.
 *
 * An mlocked page [PageMlocked(page)] is unevictable.  As such, it will
 * be placed on the LRU "unevictable" list, rather than the [in]active lists.
 * The unevictable list is an LRU sibling list to the [in]active lists.
 * PageUnevictable is set to indicate the unevictable state.
 *
 * When lazy mlocking via vmscan, it is important to ensure that the
 * vma's VM_LOCKED status is not concurrently being modified, otherwise we
 * may have mlocked a page that is being munlocked. So lazy mlock must take
 * the mmap_sem for read, and verify that the vma really is locked
 * (see mm/rmap.c).
 */

/*
 *  LRU accounting for clear_page_mlock()
 */
58
void clear_page_mlock(struct page *page)
N
Nick Piggin 已提交
59
{
60
	if (!TestClearPageMlocked(page))
N
Nick Piggin 已提交
61 62
		return;

D
David Rientjes 已提交
63 64
	mod_zone_page_state(page_zone(page), NR_MLOCK,
			    -hpage_nr_pages(page));
N
Nick Piggin 已提交
65
	count_vm_event(UNEVICTABLE_PGCLEARED);
N
Nick Piggin 已提交
66 67 68 69
	if (!isolate_lru_page(page)) {
		putback_lru_page(page);
	} else {
		/*
70
		 * We lost the race. the page already moved to evictable list.
N
Nick Piggin 已提交
71
		 */
72
		if (PageUnevictable(page))
N
Nick Piggin 已提交
73
			count_vm_event(UNEVICTABLE_PGSTRANDED);
N
Nick Piggin 已提交
74 75 76 77 78 79 80 81 82
	}
}

/*
 * Mark page as mlocked if not already.
 * If page on LRU, isolate and putback to move to unevictable list.
 */
void mlock_vma_page(struct page *page)
{
83
	/* Serialize with page migration */
N
Nick Piggin 已提交
84 85
	BUG_ON(!PageLocked(page));

86 87 88
	VM_BUG_ON_PAGE(PageTail(page), page);
	VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);

N
Nick Piggin 已提交
89
	if (!TestSetPageMlocked(page)) {
D
David Rientjes 已提交
90 91
		mod_zone_page_state(page_zone(page), NR_MLOCK,
				    hpage_nr_pages(page));
N
Nick Piggin 已提交
92 93 94 95
		count_vm_event(UNEVICTABLE_PGMLOCKED);
		if (!isolate_lru_page(page))
			putback_lru_page(page);
	}
N
Nick Piggin 已提交
96 97
}

98 99 100 101 102 103 104 105 106
/*
 * Isolate a page from LRU with optional get_page() pin.
 * Assumes lru_lock already held and page already pinned.
 */
static bool __munlock_isolate_lru_page(struct page *page, bool getpage)
{
	if (PageLRU(page)) {
		struct lruvec *lruvec;

M
Mel Gorman 已提交
107
		lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
108 109 110 111 112 113 114 115 116 117
		if (getpage)
			get_page(page);
		ClearPageLRU(page);
		del_page_from_lru_list(page, lruvec, page_lru(page));
		return true;
	}

	return false;
}

118 119 120 121 122 123 124 125 126 127 128 129 130
/*
 * Finish munlock after successful page isolation
 *
 * Page must be locked. This is a wrapper for try_to_munlock()
 * and putback_lru_page() with munlock accounting.
 */
static void __munlock_isolated_page(struct page *page)
{
	/*
	 * Optimization: if the page was mapped just once, that's our mapping
	 * and we don't need to check all the other vmas.
	 */
	if (page_mapcount(page) > 1)
131
		try_to_munlock(page);
132 133

	/* Did try_to_unlock() succeed or punt? */
134
	if (!PageMlocked(page))
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
		count_vm_event(UNEVICTABLE_PGMUNLOCKED);

	putback_lru_page(page);
}

/*
 * Accounting for page isolation fail during munlock
 *
 * Performs accounting when page isolation fails in munlock. There is nothing
 * else to do because it means some other task has already removed the page
 * from the LRU. putback_lru_page() will take care of removing the page from
 * the unevictable list, if necessary. vmscan [page_referenced()] will move
 * the page back to the unevictable list if some other vma has it mlocked.
 */
static void __munlock_isolation_failed(struct page *page)
{
	if (PageUnevictable(page))
152
		__count_vm_event(UNEVICTABLE_PGSTRANDED);
153
	else
154
		__count_vm_event(UNEVICTABLE_PGMUNLOCKED);
155 156
}

157 158
/**
 * munlock_vma_page - munlock a vma page
159 160 161 162
 * @page - page to be unlocked, either a normal page or THP page head
 *
 * returns the size of the page as a page mask (0 for normal page,
 *         HPAGE_PMD_NR - 1 for THP head page)
N
Nick Piggin 已提交
163
 *
164 165 166 167 168 169 170 171 172 173
 * called from munlock()/munmap() path with page supposedly on the LRU.
 * When we munlock a page, because the vma where we found the page is being
 * munlock()ed or munmap()ed, we want to check whether other vmas hold the
 * page locked so that we can leave it on the unevictable lru list and not
 * bother vmscan with it.  However, to walk the page's rmap list in
 * try_to_munlock() we must isolate the page from the LRU.  If some other
 * task has removed the page from the LRU, we won't be able to do that.
 * So we clear the PageMlocked as we might not get another chance.  If we
 * can't isolate the page, we leave it for putback_lru_page() and vmscan
 * [page_referenced()/try_to_unmap()] to deal with.
N
Nick Piggin 已提交
174
 */
175
unsigned int munlock_vma_page(struct page *page)
N
Nick Piggin 已提交
176
{
K
Kirill A. Shutemov 已提交
177
	int nr_pages;
178
	struct zone *zone = page_zone(page);
179

180
	/* For try_to_munlock() and to serialize with page migration */
N
Nick Piggin 已提交
181 182
	BUG_ON(!PageLocked(page));

183 184
	VM_BUG_ON_PAGE(PageTail(page), page);

185
	/*
186 187 188
	 * Serialize with any parallel __split_huge_page_refcount() which
	 * might otherwise copy PageMlocked to part of the tail pages before
	 * we clear it in the head page. It also stabilizes hpage_nr_pages().
189
	 */
190
	spin_lock_irq(zone_lru_lock(zone));
191

192 193 194
	if (!TestClearPageMlocked(page)) {
		/* Potentially, PTE-mapped THP: do not skip the rest PTEs */
		nr_pages = 1;
195
		goto unlock_out;
196
	}
197

198
	nr_pages = hpage_nr_pages(page);
199 200 201
	__mod_zone_page_state(zone, NR_MLOCK, -nr_pages);

	if (__munlock_isolate_lru_page(page, true)) {
202
		spin_unlock_irq(zone_lru_lock(zone));
203 204 205 206 207 208
		__munlock_isolated_page(page);
		goto out;
	}
	__munlock_isolation_failed(page);

unlock_out:
209
	spin_unlock_irq(zone_lru_lock(zone));
210 211

out:
212
	return nr_pages - 1;
N
Nick Piggin 已提交
213 214
}

215 216 217 218 219 220 221 222 223 224
/*
 * convert get_user_pages() return value to posix mlock() error
 */
static int __mlock_posix_error_return(long retval)
{
	if (retval == -EFAULT)
		retval = -ENOMEM;
	else if (retval == -ENOMEM)
		retval = -EAGAIN;
	return retval;
N
Nick Piggin 已提交
225 226
}

227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
/*
 * Prepare page for fast batched LRU putback via putback_lru_evictable_pagevec()
 *
 * The fast path is available only for evictable pages with single mapping.
 * Then we can bypass the per-cpu pvec and get better performance.
 * when mapcount > 1 we need try_to_munlock() which can fail.
 * when !page_evictable(), we need the full redo logic of putback_lru_page to
 * avoid leaving evictable page in unevictable list.
 *
 * In case of success, @page is added to @pvec and @pgrescued is incremented
 * in case that the page was previously unevictable. @page is also unlocked.
 */
static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec,
		int *pgrescued)
{
242 243
	VM_BUG_ON_PAGE(PageLRU(page), page);
	VM_BUG_ON_PAGE(!PageLocked(page), page);
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272

	if (page_mapcount(page) <= 1 && page_evictable(page)) {
		pagevec_add(pvec, page);
		if (TestClearPageUnevictable(page))
			(*pgrescued)++;
		unlock_page(page);
		return true;
	}

	return false;
}

/*
 * Putback multiple evictable pages to the LRU
 *
 * Batched putback of evictable pages that bypasses the per-cpu pvec. Some of
 * the pages might have meanwhile become unevictable but that is OK.
 */
static void __putback_lru_fast(struct pagevec *pvec, int pgrescued)
{
	count_vm_events(UNEVICTABLE_PGMUNLOCKED, pagevec_count(pvec));
	/*
	 *__pagevec_lru_add() calls release_pages() so we don't call
	 * put_page() explicitly
	 */
	__pagevec_lru_add(pvec);
	count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
}

273 274 275 276 277 278 279 280
/*
 * Munlock a batch of pages from the same zone
 *
 * The work is split to two main phases. First phase clears the Mlocked flag
 * and attempts to isolate the pages, all under a single zone lru lock.
 * The second phase finishes the munlock only for pages where isolation
 * succeeded.
 *
281
 * Note that the pagevec may be modified during the process.
282 283 284 285 286
 */
static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
{
	int i;
	int nr = pagevec_count(pvec);
287
	int delta_munlocked = -nr;
288 289
	struct pagevec pvec_putback;
	int pgrescued = 0;
290

291 292
	pagevec_init(&pvec_putback, 0);

293
	/* Phase 1: page isolation */
294
	spin_lock_irq(zone_lru_lock(zone));
295 296 297 298 299
	for (i = 0; i < nr; i++) {
		struct page *page = pvec->pages[i];

		if (TestClearPageMlocked(page)) {
			/*
300 301
			 * We already have pin from follow_page_mask()
			 * so we can spare the get_page() here.
302
			 */
303 304 305 306
			if (__munlock_isolate_lru_page(page, false))
				continue;
			else
				__munlock_isolation_failed(page);
307 308
		} else {
			delta_munlocked++;
309
		}
310 311 312 313 314 315 316 317 318

		/*
		 * We won't be munlocking this page in the next phase
		 * but we still need to release the follow_page_mask()
		 * pin. We cannot do it under lru_lock however. If it's
		 * the last pin, __page_cache_release() would deadlock.
		 */
		pagevec_add(&pvec_putback, pvec->pages[i]);
		pvec->pages[i] = NULL;
319
	}
320
	__mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
321
	spin_unlock_irq(zone_lru_lock(zone));
322

323 324 325
	/* Now we can release pins of pages that we are not munlocking */
	pagevec_release(&pvec_putback);

326
	/* Phase 2: page munlock */
327 328 329 330 331
	for (i = 0; i < nr; i++) {
		struct page *page = pvec->pages[i];

		if (page) {
			lock_page(page);
332 333
			if (!__putback_lru_fast_prepare(page, &pvec_putback,
					&pgrescued)) {
334 335 336 337 338
				/*
				 * Slow path. We don't want to lose the last
				 * pin before unlock_page()
				 */
				get_page(page); /* for putback_lru_page() */
339 340
				__munlock_isolated_page(page);
				unlock_page(page);
341
				put_page(page); /* from follow_page_mask() */
342
			}
343 344
		}
	}
345

346 347 348 349
	/*
	 * Phase 3: page putback for pages that qualified for the fast path
	 * This will also call put_page() to return pin from follow_page_mask()
	 */
350 351
	if (pagevec_count(&pvec_putback))
		__putback_lru_fast(&pvec_putback, pgrescued);
352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
}

/*
 * Fill up pagevec for __munlock_pagevec using pte walk
 *
 * The function expects that the struct page corresponding to @start address is
 * a non-TPH page already pinned and in the @pvec, and that it belongs to @zone.
 *
 * The rest of @pvec is filled by subsequent pages within the same pmd and same
 * zone, as long as the pte's are present and vm_normal_page() succeeds. These
 * pages also get pinned.
 *
 * Returns the address of the next page that should be scanned. This equals
 * @start + PAGE_SIZE when no page could be added by the pte walk.
 */
static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
		struct vm_area_struct *vma, int zoneid,	unsigned long start,
		unsigned long end)
{
	pte_t *pte;
	spinlock_t *ptl;

	/*
	 * Initialize pte walk starting at the already pinned page where we
376 377
	 * are sure that there is a pte, as it was pinned under the same
	 * mmap_sem write op.
378 379
	 */
	pte = get_locked_pte(vma->vm_mm, start,	&ptl);
380 381
	/* Make sure we do not cross the page table boundary */
	end = pgd_addr_end(start, end);
382
	end = p4d_addr_end(start, end);
383 384
	end = pud_addr_end(start, end);
	end = pmd_addr_end(start, end);
385 386 387 388 389 390 391 392 393 394 395 396 397 398

	/* The page next to the pinned page is the first we will try to get */
	start += PAGE_SIZE;
	while (start < end) {
		struct page *page = NULL;
		pte++;
		if (pte_present(*pte))
			page = vm_normal_page(vma, start, *pte);
		/*
		 * Break if page could not be obtained or the page's node+zone does not
		 * match
		 */
		if (!page || page_zone_id(page) != zoneid)
			break;
399

400 401 402 403 404 405 406
		/*
		 * Do not use pagevec for PTE-mapped THP,
		 * munlock_vma_pages_range() will handle them.
		 */
		if (PageTransCompound(page))
			break;

407 408 409 410 411 412 413 414 415 416 417
		get_page(page);
		/*
		 * Increase the address that will be returned *before* the
		 * eventual break due to pvec becoming full by adding the page
		 */
		start += PAGE_SIZE;
		if (pagevec_add(pvec, page) == 0)
			break;
	}
	pte_unmap_unlock(pte, ptl);
	return start;
418 419
}

N
Nick Piggin 已提交
420
/*
421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436
 * munlock_vma_pages_range() - munlock all pages in the vma range.'
 * @vma - vma containing range to be munlock()ed.
 * @start - start address in @vma of the range
 * @end - end of range in @vma.
 *
 *  For mremap(), munmap() and exit().
 *
 * Called with @vma VM_LOCKED.
 *
 * Returns with VM_LOCKED cleared.  Callers must be prepared to
 * deal with this.
 *
 * We don't save and restore VM_LOCKED here because pages are
 * still on lru.  In unmap path, pages might be scanned by reclaim
 * and re-mlocked by try_to_{munlock|unmap} before we unmap and
 * free them.  This will result in freeing mlocked pages.
N
Nick Piggin 已提交
437
 */
438
void munlock_vma_pages_range(struct vm_area_struct *vma,
H
Hugh Dickins 已提交
439
			     unsigned long start, unsigned long end)
N
Nick Piggin 已提交
440
{
E
Eric B Munson 已提交
441
	vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
H
Hugh Dickins 已提交
442

443
	while (start < end) {
444
		struct page *page;
445
		unsigned int page_mask = 0;
446
		unsigned long page_increm;
447 448 449
		struct pagevec pvec;
		struct zone *zone;
		int zoneid;
450

451
		pagevec_init(&pvec, 0);
H
Hugh Dickins 已提交
452 453 454 455 456 457 458
		/*
		 * Although FOLL_DUMP is intended for get_dump_page(),
		 * it just so happens that its special treatment of the
		 * ZERO_PAGE (returning an error instead of doing get_page)
		 * suits munlock very well (and if somehow an abnormal page
		 * has sneaked into the range, we won't oops here: great).
		 */
459
		page = follow_page(vma, start, FOLL_GET | FOLL_DUMP);
460

461 462 463 464 465 466 467 468 469
		if (page && !IS_ERR(page)) {
			if (PageTransTail(page)) {
				VM_BUG_ON_PAGE(PageMlocked(page), page);
				put_page(page); /* follow_page_mask() */
			} else if (PageTransHuge(page)) {
				lock_page(page);
				/*
				 * Any THP page found by follow_page_mask() may
				 * have gotten split before reaching
470 471
				 * munlock_vma_page(), so we need to compute
				 * the page_mask here instead.
472 473 474 475 476 477 478 479 480 481 482 483 484
				 */
				page_mask = munlock_vma_page(page);
				unlock_page(page);
				put_page(page); /* follow_page_mask() */
			} else {
				/*
				 * Non-huge pages are handled in batches via
				 * pagevec. The pin from follow_page_mask()
				 * prevents them from collapsing by THP.
				 */
				pagevec_add(&pvec, page);
				zone = page_zone(page);
				zoneid = page_zone_id(page);
485

486 487 488 489 490 491 492 493 494 495 496
				/*
				 * Try to fill the rest of pagevec using fast
				 * pte walk. This will also update start to
				 * the next page to process. Then munlock the
				 * pagevec.
				 */
				start = __munlock_pagevec_fill(&pvec, vma,
						zoneid, start, end);
				__munlock_pagevec(&pvec, zone);
				goto next;
			}
H
Hugh Dickins 已提交
497
		}
498
		page_increm = 1 + page_mask;
499
		start += page_increm * PAGE_SIZE;
500
next:
H
Hugh Dickins 已提交
501 502
		cond_resched();
	}
N
Nick Piggin 已提交
503 504 505 506 507 508 509
}

/*
 * mlock_fixup  - handle mlock[all]/munlock[all] requests.
 *
 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
 * munlock is a no-op.  However, for some special vmas, we go ahead and
510
 * populate the ptes.
N
Nick Piggin 已提交
511 512 513
 *
 * For vmas that pass the filters, merge/split as appropriate.
 */
L
Linus Torvalds 已提交
514
static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
515
	unsigned long start, unsigned long end, vm_flags_t newflags)
L
Linus Torvalds 已提交
516
{
N
Nick Piggin 已提交
517
	struct mm_struct *mm = vma->vm_mm;
L
Linus Torvalds 已提交
518
	pgoff_t pgoff;
N
Nick Piggin 已提交
519
	int nr_pages;
L
Linus Torvalds 已提交
520
	int ret = 0;
521
	int lock = !!(newflags & VM_LOCKED);
522
	vm_flags_t old_flags = vma->vm_flags;
L
Linus Torvalds 已提交
523

524
	if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
525
	    is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm))
526 527
		/* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */
		goto out;
N
Nick Piggin 已提交
528

L
Linus Torvalds 已提交
529 530
	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
	*prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
531 532
			  vma->vm_file, pgoff, vma_policy(vma),
			  vma->vm_userfaultfd_ctx);
L
Linus Torvalds 已提交
533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550
	if (*prev) {
		vma = *prev;
		goto success;
	}

	if (start != vma->vm_start) {
		ret = split_vma(mm, vma, start, 1);
		if (ret)
			goto out;
	}

	if (end != vma->vm_end) {
		ret = split_vma(mm, vma, end, 0);
		if (ret)
			goto out;
	}

success:
N
Nick Piggin 已提交
551 552 553 554 555 556
	/*
	 * Keep track of amount of locked VM.
	 */
	nr_pages = (end - start) >> PAGE_SHIFT;
	if (!lock)
		nr_pages = -nr_pages;
557 558
	else if (old_flags & VM_LOCKED)
		nr_pages = 0;
N
Nick Piggin 已提交
559 560
	mm->locked_vm += nr_pages;

L
Linus Torvalds 已提交
561 562 563
	/*
	 * vm_flags is protected by the mmap_sem held in write mode.
	 * It's okay if try_to_unmap_one unmaps a page just after we
564
	 * set VM_LOCKED, populate_vma_page_range will bring it back.
L
Linus Torvalds 已提交
565 566
	 */

567
	if (lock)
H
Hugh Dickins 已提交
568
		vma->vm_flags = newflags;
569
	else
H
Hugh Dickins 已提交
570
		munlock_vma_pages_range(vma, start, end);
L
Linus Torvalds 已提交
571 572

out:
N
Nick Piggin 已提交
573
	*prev = vma;
L
Linus Torvalds 已提交
574 575 576
	return ret;
}

577 578
static int apply_vma_lock_flags(unsigned long start, size_t len,
				vm_flags_t flags)
L
Linus Torvalds 已提交
579 580 581 582 583
{
	unsigned long nstart, end, tmp;
	struct vm_area_struct * vma, * prev;
	int error;

584
	VM_BUG_ON(offset_in_page(start));
585
	VM_BUG_ON(len != PAGE_ALIGN(len));
L
Linus Torvalds 已提交
586 587 588 589 590
	end = start + len;
	if (end < start)
		return -EINVAL;
	if (end == start)
		return 0;
591
	vma = find_vma(current->mm, start);
L
Linus Torvalds 已提交
592 593 594
	if (!vma || vma->vm_start > start)
		return -ENOMEM;

595
	prev = vma->vm_prev;
L
Linus Torvalds 已提交
596 597 598 599
	if (start > vma->vm_start)
		prev = vma;

	for (nstart = start ; ; ) {
600
		vm_flags_t newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
L
Linus Torvalds 已提交
601

602
		newflags |= flags;
L
Linus Torvalds 已提交
603

604
		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
L
Linus Torvalds 已提交
605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625
		tmp = vma->vm_end;
		if (tmp > end)
			tmp = end;
		error = mlock_fixup(vma, &prev, nstart, tmp, newflags);
		if (error)
			break;
		nstart = tmp;
		if (nstart < prev->vm_end)
			nstart = prev->vm_end;
		if (nstart >= end)
			break;

		vma = prev->vm_next;
		if (!vma || vma->vm_start != nstart) {
			error = -ENOMEM;
			break;
		}
	}
	return error;
}

626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664
/*
 * Go through vma areas and sum size of mlocked
 * vma pages, as return value.
 * Note deferred memory locking case(mlock2(,,MLOCK_ONFAULT)
 * is also counted.
 * Return value: previously mlocked page counts
 */
static int count_mm_mlocked_page_nr(struct mm_struct *mm,
		unsigned long start, size_t len)
{
	struct vm_area_struct *vma;
	int count = 0;

	if (mm == NULL)
		mm = current->mm;

	vma = find_vma(mm, start);
	if (vma == NULL)
		vma = mm->mmap;

	for (; vma ; vma = vma->vm_next) {
		if (start >= vma->vm_end)
			continue;
		if (start + len <=  vma->vm_start)
			break;
		if (vma->vm_flags & VM_LOCKED) {
			if (start > vma->vm_start)
				count -= (start - vma->vm_start);
			if (start + len < vma->vm_end) {
				count += start + len - vma->vm_start;
				break;
			}
			count += vma->vm_end - vma->vm_start;
		}
	}

	return count >> PAGE_SHIFT;
}

665
static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags)
L
Linus Torvalds 已提交
666 667 668 669 670 671 672 673
{
	unsigned long locked;
	unsigned long lock_limit;
	int error = -ENOMEM;

	if (!can_do_mlock())
		return -EPERM;

674 675
	lru_add_drain_all();	/* flush pagevec */

676
	len = PAGE_ALIGN(len + (offset_in_page(start)));
L
Linus Torvalds 已提交
677 678
	start &= PAGE_MASK;

J
Jiri Slaby 已提交
679
	lock_limit = rlimit(RLIMIT_MEMLOCK);
L
Linus Torvalds 已提交
680
	lock_limit >>= PAGE_SHIFT;
681 682
	locked = len >> PAGE_SHIFT;

683 684
	if (down_write_killable(&current->mm->mmap_sem))
		return -EINTR;
685 686

	locked += current->mm->locked_vm;
687 688 689 690 691 692 693 694 695 696
	if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) {
		/*
		 * It is possible that the regions requested intersect with
		 * previously mlocked areas, that part area in "mm->locked_vm"
		 * should not be counted to new mlock increment count. So check
		 * and adjust locked count if necessary.
		 */
		locked -= count_mm_mlocked_page_nr(current->mm,
				start, len);
	}
L
Linus Torvalds 已提交
697 698 699

	/* check against resource limits */
	if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
700
		error = apply_vma_lock_flags(start, len, flags);
701

L
Linus Torvalds 已提交
702
	up_write(&current->mm->mmap_sem);
703 704 705 706 707 708 709
	if (error)
		return error;

	error = __mm_populate(start, len, 0);
	if (error)
		return __mlock_posix_error_return(error);
	return 0;
L
Linus Torvalds 已提交
710 711
}

712 713 714 715 716
SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
{
	return do_mlock(start, len, VM_LOCKED);
}

717 718
SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags)
{
719 720 721
	vm_flags_t vm_flags = VM_LOCKED;

	if (flags & ~MLOCK_ONFAULT)
722 723
		return -EINVAL;

724 725 726 727
	if (flags & MLOCK_ONFAULT)
		vm_flags |= VM_LOCKONFAULT;

	return do_mlock(start, len, vm_flags);
728 729
}

730
SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
L
Linus Torvalds 已提交
731 732 733
{
	int ret;

734
	len = PAGE_ALIGN(len + (offset_in_page(start)));
L
Linus Torvalds 已提交
735
	start &= PAGE_MASK;
736

737 738
	if (down_write_killable(&current->mm->mmap_sem))
		return -EINTR;
739
	ret = apply_vma_lock_flags(start, len, 0);
L
Linus Torvalds 已提交
740
	up_write(&current->mm->mmap_sem);
741

L
Linus Torvalds 已提交
742 743 744
	return ret;
}

745 746 747 748 749 750 751 752 753 754
/*
 * Take the MCL_* flags passed into mlockall (or 0 if called from munlockall)
 * and translate into the appropriate modifications to mm->def_flags and/or the
 * flags for all current VMAs.
 *
 * There are a couple of subtleties with this.  If mlockall() is called multiple
 * times with different flags, the values do not necessarily stack.  If mlockall
 * is called once including the MCL_FUTURE flag and then a second time without
 * it, VM_LOCKED and VM_LOCKONFAULT will be cleared from mm->def_flags.
 */
755
static int apply_mlockall_flags(int flags)
L
Linus Torvalds 已提交
756 757
{
	struct vm_area_struct * vma, * prev = NULL;
758
	vm_flags_t to_add = 0;
L
Linus Torvalds 已提交
759

760 761
	current->mm->def_flags &= VM_LOCKED_CLEAR_MASK;
	if (flags & MCL_FUTURE) {
762
		current->mm->def_flags |= VM_LOCKED;
763

764 765 766 767 768 769 770 771 772 773 774 775
		if (flags & MCL_ONFAULT)
			current->mm->def_flags |= VM_LOCKONFAULT;

		if (!(flags & MCL_CURRENT))
			goto out;
	}

	if (flags & MCL_CURRENT) {
		to_add |= VM_LOCKED;
		if (flags & MCL_ONFAULT)
			to_add |= VM_LOCKONFAULT;
	}
L
Linus Torvalds 已提交
776 777

	for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
778
		vm_flags_t newflags;
L
Linus Torvalds 已提交
779

780 781
		newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
		newflags |= to_add;
L
Linus Torvalds 已提交
782 783 784

		/* Ignore errors */
		mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
785
		cond_resched_rcu_qs();
L
Linus Torvalds 已提交
786 787 788 789 790
	}
out:
	return 0;
}

791
SYSCALL_DEFINE1(mlockall, int, flags)
L
Linus Torvalds 已提交
792 793
{
	unsigned long lock_limit;
794
	int ret;
L
Linus Torvalds 已提交
795

796
	if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)))
797
		return -EINVAL;
L
Linus Torvalds 已提交
798 799

	if (!can_do_mlock())
800
		return -EPERM;
L
Linus Torvalds 已提交
801

802 803
	if (flags & MCL_CURRENT)
		lru_add_drain_all();	/* flush pagevec */
804

J
Jiri Slaby 已提交
805
	lock_limit = rlimit(RLIMIT_MEMLOCK);
L
Linus Torvalds 已提交
806 807
	lock_limit >>= PAGE_SHIFT;

808 809
	if (down_write_killable(&current->mm->mmap_sem))
		return -EINTR;
810

811
	ret = -ENOMEM;
L
Linus Torvalds 已提交
812 813
	if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
	    capable(CAP_IPC_LOCK))
814
		ret = apply_mlockall_flags(flags);
L
Linus Torvalds 已提交
815
	up_write(&current->mm->mmap_sem);
816 817
	if (!ret && (flags & MCL_CURRENT))
		mm_populate(0, TASK_SIZE);
818

L
Linus Torvalds 已提交
819 820 821
	return ret;
}

822
SYSCALL_DEFINE0(munlockall)
L
Linus Torvalds 已提交
823 824 825
{
	int ret;

826 827
	if (down_write_killable(&current->mm->mmap_sem))
		return -EINTR;
828
	ret = apply_mlockall_flags(0);
L
Linus Torvalds 已提交
829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844
	up_write(&current->mm->mmap_sem);
	return ret;
}

/*
 * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
 * shm segments) get accounted against the user_struct instead.
 */
static DEFINE_SPINLOCK(shmlock_user_lock);

int user_shm_lock(size_t size, struct user_struct *user)
{
	unsigned long lock_limit, locked;
	int allowed = 0;

	locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
J
Jiri Slaby 已提交
845
	lock_limit = rlimit(RLIMIT_MEMLOCK);
846 847
	if (lock_limit == RLIM_INFINITY)
		allowed = 1;
L
Linus Torvalds 已提交
848 849
	lock_limit >>= PAGE_SHIFT;
	spin_lock(&shmlock_user_lock);
850 851
	if (!allowed &&
	    locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
L
Linus Torvalds 已提交
852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867
		goto out;
	get_uid(user);
	user->locked_shm += locked;
	allowed = 1;
out:
	spin_unlock(&shmlock_user_lock);
	return allowed;
}

void user_shm_unlock(size_t size, struct user_struct *user)
{
	spin_lock(&shmlock_user_lock);
	user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
	spin_unlock(&shmlock_user_lock);
	free_uid(user);
}