mlock.c 17.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7
/*
 *	linux/mm/mlock.c
 *
 *  (C) Copyright 1995 Linus Torvalds
 *  (C) Copyright 2002 Christoph Hellwig
 */

8
#include <linux/capability.h>
L
Linus Torvalds 已提交
9 10
#include <linux/mman.h>
#include <linux/mm.h>
N
Nick Piggin 已提交
11 12 13
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/pagemap.h>
14
#include <linux/pagevec.h>
L
Linus Torvalds 已提交
15 16
#include <linux/mempolicy.h>
#include <linux/syscalls.h>
A
Alexey Dobriyan 已提交
17
#include <linux/sched.h>
18
#include <linux/export.h>
N
Nick Piggin 已提交
19 20 21
#include <linux/rmap.h>
#include <linux/mmzone.h>
#include <linux/hugetlb.h>
22 23
#include <linux/memcontrol.h>
#include <linux/mm_inline.h>
N
Nick Piggin 已提交
24 25

#include "internal.h"
L
Linus Torvalds 已提交
26

A
Alexey Dobriyan 已提交
27 28 29 30
int can_do_mlock(void)
{
	if (capable(CAP_IPC_LOCK))
		return 1;
J
Jiri Slaby 已提交
31
	if (rlimit(RLIMIT_MEMLOCK) != 0)
A
Alexey Dobriyan 已提交
32 33 34 35
		return 1;
	return 0;
}
EXPORT_SYMBOL(can_do_mlock);
L
Linus Torvalds 已提交
36

N
Nick Piggin 已提交
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
/*
 * Mlocked pages are marked with PageMlocked() flag for efficient testing
 * in vmscan and, possibly, the fault path; and to support semi-accurate
 * statistics.
 *
 * An mlocked page [PageMlocked(page)] is unevictable.  As such, it will
 * be placed on the LRU "unevictable" list, rather than the [in]active lists.
 * The unevictable list is an LRU sibling list to the [in]active lists.
 * PageUnevictable is set to indicate the unevictable state.
 *
 * When lazy mlocking via vmscan, it is important to ensure that the
 * vma's VM_LOCKED status is not concurrently being modified, otherwise we
 * may have mlocked a page that is being munlocked. So lazy mlock must take
 * the mmap_sem for read, and verify that the vma really is locked
 * (see mm/rmap.c).
 */

/*
 *  LRU accounting for clear_page_mlock()
 */
57
void clear_page_mlock(struct page *page)
N
Nick Piggin 已提交
58
{
59
	if (!TestClearPageMlocked(page))
N
Nick Piggin 已提交
60 61
		return;

D
David Rientjes 已提交
62 63
	mod_zone_page_state(page_zone(page), NR_MLOCK,
			    -hpage_nr_pages(page));
N
Nick Piggin 已提交
64
	count_vm_event(UNEVICTABLE_PGCLEARED);
N
Nick Piggin 已提交
65 66 67 68
	if (!isolate_lru_page(page)) {
		putback_lru_page(page);
	} else {
		/*
69
		 * We lost the race. the page already moved to evictable list.
N
Nick Piggin 已提交
70
		 */
71
		if (PageUnevictable(page))
N
Nick Piggin 已提交
72
			count_vm_event(UNEVICTABLE_PGSTRANDED);
N
Nick Piggin 已提交
73 74 75 76 77 78 79 80 81 82 83
	}
}

/*
 * Mark page as mlocked if not already.
 * If page on LRU, isolate and putback to move to unevictable list.
 */
void mlock_vma_page(struct page *page)
{
	BUG_ON(!PageLocked(page));

N
Nick Piggin 已提交
84
	if (!TestSetPageMlocked(page)) {
D
David Rientjes 已提交
85 86
		mod_zone_page_state(page_zone(page), NR_MLOCK,
				    hpage_nr_pages(page));
N
Nick Piggin 已提交
87 88 89 90
		count_vm_event(UNEVICTABLE_PGMLOCKED);
		if (!isolate_lru_page(page))
			putback_lru_page(page);
	}
N
Nick Piggin 已提交
91 92
}

93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
/*
 * Finish munlock after successful page isolation
 *
 * Page must be locked. This is a wrapper for try_to_munlock()
 * and putback_lru_page() with munlock accounting.
 */
static void __munlock_isolated_page(struct page *page)
{
	int ret = SWAP_AGAIN;

	/*
	 * Optimization: if the page was mapped just once, that's our mapping
	 * and we don't need to check all the other vmas.
	 */
	if (page_mapcount(page) > 1)
		ret = try_to_munlock(page);

	/* Did try_to_unlock() succeed or punt? */
	if (ret != SWAP_MLOCK)
		count_vm_event(UNEVICTABLE_PGMUNLOCKED);

	putback_lru_page(page);
}

/*
 * Accounting for page isolation fail during munlock
 *
 * Performs accounting when page isolation fails in munlock. There is nothing
 * else to do because it means some other task has already removed the page
 * from the LRU. putback_lru_page() will take care of removing the page from
 * the unevictable list, if necessary. vmscan [page_referenced()] will move
 * the page back to the unevictable list if some other vma has it mlocked.
 */
static void __munlock_isolation_failed(struct page *page)
{
	if (PageUnevictable(page))
		count_vm_event(UNEVICTABLE_PGSTRANDED);
	else
		count_vm_event(UNEVICTABLE_PGMUNLOCKED);
}

134 135 136
/**
 * munlock_vma_page - munlock a vma page
 * @page - page to be unlocked
N
Nick Piggin 已提交
137
 *
138 139 140 141 142 143 144 145 146 147
 * called from munlock()/munmap() path with page supposedly on the LRU.
 * When we munlock a page, because the vma where we found the page is being
 * munlock()ed or munmap()ed, we want to check whether other vmas hold the
 * page locked so that we can leave it on the unevictable lru list and not
 * bother vmscan with it.  However, to walk the page's rmap list in
 * try_to_munlock() we must isolate the page from the LRU.  If some other
 * task has removed the page from the LRU, we won't be able to do that.
 * So we clear the PageMlocked as we might not get another chance.  If we
 * can't isolate the page, we leave it for putback_lru_page() and vmscan
 * [page_referenced()/try_to_unmap()] to deal with.
N
Nick Piggin 已提交
148
 */
149
unsigned int munlock_vma_page(struct page *page)
N
Nick Piggin 已提交
150
{
151 152
	unsigned int page_mask = 0;

N
Nick Piggin 已提交
153 154
	BUG_ON(!PageLocked(page));

N
Nick Piggin 已提交
155
	if (TestClearPageMlocked(page)) {
156 157 158
		unsigned int nr_pages = hpage_nr_pages(page);
		mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
		page_mask = nr_pages - 1;
159 160 161 162
		if (!isolate_lru_page(page))
			__munlock_isolated_page(page);
		else
			__munlock_isolation_failed(page);
N
Nick Piggin 已提交
163
	}
164 165

	return page_mask;
N
Nick Piggin 已提交
166 167
}

168
/**
H
Hugh Dickins 已提交
169
 * __mlock_vma_pages_range() -  mlock a range of pages in the vma.
170 171 172 173
 * @vma:   target vma
 * @start: start address
 * @end:   end address
 *
H
Hugh Dickins 已提交
174
 * This takes care of making the pages present too.
N
Nick Piggin 已提交
175
 *
176
 * return 0 on success, negative error code on error.
N
Nick Piggin 已提交
177
 *
178
 * vma->vm_mm->mmap_sem must be held for at least read.
N
Nick Piggin 已提交
179
 */
180 181
long __mlock_vma_pages_range(struct vm_area_struct *vma,
		unsigned long start, unsigned long end, int *nonblocking)
N
Nick Piggin 已提交
182 183
{
	struct mm_struct *mm = vma->vm_mm;
184
	unsigned long nr_pages = (end - start) / PAGE_SIZE;
H
Hugh Dickins 已提交
185
	int gup_flags;
186 187 188 189 190

	VM_BUG_ON(start & ~PAGE_MASK);
	VM_BUG_ON(end   & ~PAGE_MASK);
	VM_BUG_ON(start < vma->vm_start);
	VM_BUG_ON(end   > vma->vm_end);
H
Hugh Dickins 已提交
191
	VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
N
Nick Piggin 已提交
192

193
	gup_flags = FOLL_TOUCH | FOLL_MLOCK;
194 195 196 197 198 199
	/*
	 * We want to touch writable mappings with a write fault in order
	 * to break COW, except for shared mappings because these don't COW
	 * and we would not want to dirty them for nothing.
	 */
	if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
H
Hugh Dickins 已提交
200
		gup_flags |= FOLL_WRITE;
N
Nick Piggin 已提交
201

202 203 204 205 206 207 208
	/*
	 * We want mlock to succeed for regions that have any permissions
	 * other than PROT_NONE.
	 */
	if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
		gup_flags |= FOLL_FORCE;

209 210 211 212
	/*
	 * We made sure addr is within a VMA, so the following will
	 * not result in a stack expansion that recurses back here.
	 */
213
	return __get_user_pages(current, mm, start, nr_pages, gup_flags,
214
				NULL, NULL, nonblocking);
215 216 217 218 219 220 221 222 223 224 225 226
}

/*
 * convert get_user_pages() return value to posix mlock() error
 */
static int __mlock_posix_error_return(long retval)
{
	if (retval == -EFAULT)
		retval = -ENOMEM;
	else if (retval == -ENOMEM)
		retval = -EAGAIN;
	return retval;
N
Nick Piggin 已提交
227 228
}

229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
/*
 * Munlock a batch of pages from the same zone
 *
 * The work is split to two main phases. First phase clears the Mlocked flag
 * and attempts to isolate the pages, all under a single zone lru lock.
 * The second phase finishes the munlock only for pages where isolation
 * succeeded.
 *
 * Note that pvec is modified during the process. Before returning
 * pagevec_reinit() is called on it.
 */
static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
{
	int i;
	int nr = pagevec_count(pvec);
244
	int delta_munlocked = -nr;
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275

	/* Phase 1: page isolation */
	spin_lock_irq(&zone->lru_lock);
	for (i = 0; i < nr; i++) {
		struct page *page = pvec->pages[i];

		if (TestClearPageMlocked(page)) {
			struct lruvec *lruvec;
			int lru;

			if (PageLRU(page)) {
				lruvec = mem_cgroup_page_lruvec(page, zone);
				lru = page_lru(page);

				get_page(page);
				ClearPageLRU(page);
				del_page_from_lru_list(page, lruvec, lru);
			} else {
				__munlock_isolation_failed(page);
				goto skip_munlock;
			}

		} else {
skip_munlock:
			/*
			 * We won't be munlocking this page in the next phase
			 * but we still need to release the follow_page_mask()
			 * pin.
			 */
			pvec->pages[i] = NULL;
			put_page(page);
276
			delta_munlocked++;
277 278
		}
	}
279
	__mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
	spin_unlock_irq(&zone->lru_lock);

	/* Phase 2: page munlock and putback */
	for (i = 0; i < nr; i++) {
		struct page *page = pvec->pages[i];

		if (page) {
			lock_page(page);
			__munlock_isolated_page(page);
			unlock_page(page);
			put_page(page); /* pin from follow_page_mask() */
		}
	}
	pagevec_reinit(pvec);
}

N
Nick Piggin 已提交
296
/*
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
 * munlock_vma_pages_range() - munlock all pages in the vma range.'
 * @vma - vma containing range to be munlock()ed.
 * @start - start address in @vma of the range
 * @end - end of range in @vma.
 *
 *  For mremap(), munmap() and exit().
 *
 * Called with @vma VM_LOCKED.
 *
 * Returns with VM_LOCKED cleared.  Callers must be prepared to
 * deal with this.
 *
 * We don't save and restore VM_LOCKED here because pages are
 * still on lru.  In unmap path, pages might be scanned by reclaim
 * and re-mlocked by try_to_{munlock|unmap} before we unmap and
 * free them.  This will result in freeing mlocked pages.
N
Nick Piggin 已提交
313
 */
314
void munlock_vma_pages_range(struct vm_area_struct *vma,
H
Hugh Dickins 已提交
315
			     unsigned long start, unsigned long end)
N
Nick Piggin 已提交
316
{
317 318 319 320
	struct pagevec pvec;
	struct zone *zone = NULL;

	pagevec_init(&pvec, 0);
N
Nick Piggin 已提交
321
	vma->vm_flags &= ~VM_LOCKED;
H
Hugh Dickins 已提交
322

323
	while (start < end) {
H
Hugh Dickins 已提交
324
		struct page *page;
325
		unsigned int page_mask, page_increm;
326
		struct zone *pagezone;
327

H
Hugh Dickins 已提交
328 329 330 331 332 333 334
		/*
		 * Although FOLL_DUMP is intended for get_dump_page(),
		 * it just so happens that its special treatment of the
		 * ZERO_PAGE (returning an error instead of doing get_page)
		 * suits munlock very well (and if somehow an abnormal page
		 * has sneaked into the range, we won't oops here: great).
		 */
335 336
		page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP,
					&page_mask);
H
Hugh Dickins 已提交
337
		if (page && !IS_ERR(page)) {
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371
			pagezone = page_zone(page);
			/* The whole pagevec must be in the same zone */
			if (pagezone != zone) {
				if (pagevec_count(&pvec))
					__munlock_pagevec(&pvec, zone);
				zone = pagezone;
			}
			if (PageTransHuge(page)) {
				/*
				 * THP pages are not handled by pagevec due
				 * to their possible split (see below).
				 */
				if (pagevec_count(&pvec))
					__munlock_pagevec(&pvec, zone);
				lock_page(page);
				/*
				 * Any THP page found by follow_page_mask() may
				 * have gotten split before reaching
				 * munlock_vma_page(), so we need to recompute
				 * the page_mask here.
				 */
				page_mask = munlock_vma_page(page);
				unlock_page(page);
				put_page(page); /* follow_page_mask() */
			} else {
				/*
				 * Non-huge pages are handled in batches
				 * via pagevec. The pin from
				 * follow_page_mask() prevents them from
				 * collapsing by THP.
				 */
				if (pagevec_add(&pvec, page) == 0)
					__munlock_pagevec(&pvec, zone);
			}
H
Hugh Dickins 已提交
372
		}
373 374
		page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
		start += page_increm * PAGE_SIZE;
H
Hugh Dickins 已提交
375 376
		cond_resched();
	}
377 378
	if (pagevec_count(&pvec))
		__munlock_pagevec(&pvec, zone);
N
Nick Piggin 已提交
379 380 381 382 383 384 385
}

/*
 * mlock_fixup  - handle mlock[all]/munlock[all] requests.
 *
 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
 * munlock is a no-op.  However, for some special vmas, we go ahead and
386
 * populate the ptes.
N
Nick Piggin 已提交
387 388 389
 *
 * For vmas that pass the filters, merge/split as appropriate.
 */
L
Linus Torvalds 已提交
390
static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
391
	unsigned long start, unsigned long end, vm_flags_t newflags)
L
Linus Torvalds 已提交
392
{
N
Nick Piggin 已提交
393
	struct mm_struct *mm = vma->vm_mm;
L
Linus Torvalds 已提交
394
	pgoff_t pgoff;
N
Nick Piggin 已提交
395
	int nr_pages;
L
Linus Torvalds 已提交
396
	int ret = 0;
397
	int lock = !!(newflags & VM_LOCKED);
L
Linus Torvalds 已提交
398

399
	if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
400
	    is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm))
N
Nick Piggin 已提交
401 402
		goto out;	/* don't set VM_LOCKED,  don't count */

L
Linus Torvalds 已提交
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
	*prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
			  vma->vm_file, pgoff, vma_policy(vma));
	if (*prev) {
		vma = *prev;
		goto success;
	}

	if (start != vma->vm_start) {
		ret = split_vma(mm, vma, start, 1);
		if (ret)
			goto out;
	}

	if (end != vma->vm_end) {
		ret = split_vma(mm, vma, end, 0);
		if (ret)
			goto out;
	}

success:
N
Nick Piggin 已提交
424 425 426 427 428 429 430 431
	/*
	 * Keep track of amount of locked VM.
	 */
	nr_pages = (end - start) >> PAGE_SHIFT;
	if (!lock)
		nr_pages = -nr_pages;
	mm->locked_vm += nr_pages;

L
Linus Torvalds 已提交
432 433 434
	/*
	 * vm_flags is protected by the mmap_sem held in write mode.
	 * It's okay if try_to_unmap_one unmaps a page just after we
N
Nick Piggin 已提交
435
	 * set VM_LOCKED, __mlock_vma_pages_range will bring it back.
L
Linus Torvalds 已提交
436 437
	 */

438
	if (lock)
H
Hugh Dickins 已提交
439
		vma->vm_flags = newflags;
440
	else
H
Hugh Dickins 已提交
441
		munlock_vma_pages_range(vma, start, end);
L
Linus Torvalds 已提交
442 443

out:
N
Nick Piggin 已提交
444
	*prev = vma;
L
Linus Torvalds 已提交
445 446 447 448 449 450 451 452 453
	return ret;
}

static int do_mlock(unsigned long start, size_t len, int on)
{
	unsigned long nstart, end, tmp;
	struct vm_area_struct * vma, * prev;
	int error;

454 455
	VM_BUG_ON(start & ~PAGE_MASK);
	VM_BUG_ON(len != PAGE_ALIGN(len));
L
Linus Torvalds 已提交
456 457 458 459 460
	end = start + len;
	if (end < start)
		return -EINVAL;
	if (end == start)
		return 0;
461
	vma = find_vma(current->mm, start);
L
Linus Torvalds 已提交
462 463 464
	if (!vma || vma->vm_start > start)
		return -ENOMEM;

465
	prev = vma->vm_prev;
L
Linus Torvalds 已提交
466 467 468 469
	if (start > vma->vm_start)
		prev = vma;

	for (nstart = start ; ; ) {
470
		vm_flags_t newflags;
L
Linus Torvalds 已提交
471 472 473

		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */

474 475
		newflags = vma->vm_flags & ~VM_LOCKED;
		if (on)
476
			newflags |= VM_LOCKED;
L
Linus Torvalds 已提交
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498

		tmp = vma->vm_end;
		if (tmp > end)
			tmp = end;
		error = mlock_fixup(vma, &prev, nstart, tmp, newflags);
		if (error)
			break;
		nstart = tmp;
		if (nstart < prev->vm_end)
			nstart = prev->vm_end;
		if (nstart >= end)
			break;

		vma = prev->vm_next;
		if (!vma || vma->vm_start != nstart) {
			error = -ENOMEM;
			break;
		}
	}
	return error;
}

499 500 501 502 503 504 505 506
/*
 * __mm_populate - populate and/or mlock pages within a range of address space.
 *
 * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
 * flags. VMAs must be already marked with the desired vm_flags, and
 * mmap_sem must not be held.
 */
int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
507 508 509 510
{
	struct mm_struct *mm = current->mm;
	unsigned long end, nstart, nend;
	struct vm_area_struct *vma = NULL;
511
	int locked = 0;
512
	long ret = 0;
513 514 515 516 517 518 519 520 521 522

	VM_BUG_ON(start & ~PAGE_MASK);
	VM_BUG_ON(len != PAGE_ALIGN(len));
	end = start + len;

	for (nstart = start; nstart < end; nstart = nend) {
		/*
		 * We want to fault in pages for [nstart; end) address range.
		 * Find first corresponding VMA.
		 */
523 524 525
		if (!locked) {
			locked = 1;
			down_read(&mm->mmap_sem);
526
			vma = find_vma(mm, nstart);
527
		} else if (nstart >= vma->vm_end)
528 529 530 531 532 533 534 535
			vma = vma->vm_next;
		if (!vma || vma->vm_start >= end)
			break;
		/*
		 * Set [nstart; nend) to intersection of desired address
		 * range with the first VMA. Also, skip undesirable VMA types.
		 */
		nend = min(end, vma->vm_end);
536
		if (vma->vm_flags & (VM_IO | VM_PFNMAP))
537 538 539 540
			continue;
		if (nstart < vma->vm_start)
			nstart = vma->vm_start;
		/*
541 542 543
		 * Now fault in a range of pages. __mlock_vma_pages_range()
		 * double checks the vma flags, so that it won't mlock pages
		 * if the vma was already munlocked.
544
		 */
545 546 547 548 549 550
		ret = __mlock_vma_pages_range(vma, nstart, nend, &locked);
		if (ret < 0) {
			if (ignore_errors) {
				ret = 0;
				continue;	/* continue at next VMA */
			}
551 552 553
			ret = __mlock_posix_error_return(ret);
			break;
		}
554 555
		nend = nstart + ret * PAGE_SIZE;
		ret = 0;
556
	}
557 558
	if (locked)
		up_read(&mm->mmap_sem);
559 560 561
	return ret;	/* 0 or negative error code */
}

562
SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
L
Linus Torvalds 已提交
563 564 565 566 567 568 569 570
{
	unsigned long locked;
	unsigned long lock_limit;
	int error = -ENOMEM;

	if (!can_do_mlock())
		return -EPERM;

571 572
	lru_add_drain_all();	/* flush pagevec */

L
Linus Torvalds 已提交
573 574 575 576 577 578 579
	down_write(&current->mm->mmap_sem);
	len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
	start &= PAGE_MASK;

	locked = len >> PAGE_SHIFT;
	locked += current->mm->locked_vm;

J
Jiri Slaby 已提交
580
	lock_limit = rlimit(RLIMIT_MEMLOCK);
L
Linus Torvalds 已提交
581 582 583 584 585 586
	lock_limit >>= PAGE_SHIFT;

	/* check against resource limits */
	if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
		error = do_mlock(start, len, 1);
	up_write(&current->mm->mmap_sem);
587
	if (!error)
588
		error = __mm_populate(start, len, 0);
L
Linus Torvalds 已提交
589 590 591
	return error;
}

592
SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
L
Linus Torvalds 已提交
593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608
{
	int ret;

	down_write(&current->mm->mmap_sem);
	len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
	start &= PAGE_MASK;
	ret = do_mlock(start, len, 0);
	up_write(&current->mm->mmap_sem);
	return ret;
}

static int do_mlockall(int flags)
{
	struct vm_area_struct * vma, * prev = NULL;

	if (flags & MCL_FUTURE)
609
		current->mm->def_flags |= VM_LOCKED;
610
	else
611
		current->mm->def_flags &= ~VM_LOCKED;
L
Linus Torvalds 已提交
612 613 614 615
	if (flags == MCL_FUTURE)
		goto out;

	for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
616
		vm_flags_t newflags;
L
Linus Torvalds 已提交
617

618 619
		newflags = vma->vm_flags & ~VM_LOCKED;
		if (flags & MCL_CURRENT)
620
			newflags |= VM_LOCKED;
L
Linus Torvalds 已提交
621 622 623 624 625 626 627 628

		/* Ignore errors */
		mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
	}
out:
	return 0;
}

629
SYSCALL_DEFINE1(mlockall, int, flags)
L
Linus Torvalds 已提交
630 631 632 633 634 635 636 637 638 639 640
{
	unsigned long lock_limit;
	int ret = -EINVAL;

	if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
		goto out;

	ret = -EPERM;
	if (!can_do_mlock())
		goto out;

641 642
	if (flags & MCL_CURRENT)
		lru_add_drain_all();	/* flush pagevec */
643

L
Linus Torvalds 已提交
644 645
	down_write(&current->mm->mmap_sem);

J
Jiri Slaby 已提交
646
	lock_limit = rlimit(RLIMIT_MEMLOCK);
L
Linus Torvalds 已提交
647 648 649 650 651 652 653
	lock_limit >>= PAGE_SHIFT;

	ret = -ENOMEM;
	if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
	    capable(CAP_IPC_LOCK))
		ret = do_mlockall(flags);
	up_write(&current->mm->mmap_sem);
654 655
	if (!ret && (flags & MCL_CURRENT))
		mm_populate(0, TASK_SIZE);
L
Linus Torvalds 已提交
656 657 658 659
out:
	return ret;
}

660
SYSCALL_DEFINE0(munlockall)
L
Linus Torvalds 已提交
661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681
{
	int ret;

	down_write(&current->mm->mmap_sem);
	ret = do_mlockall(0);
	up_write(&current->mm->mmap_sem);
	return ret;
}

/*
 * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
 * shm segments) get accounted against the user_struct instead.
 */
static DEFINE_SPINLOCK(shmlock_user_lock);

int user_shm_lock(size_t size, struct user_struct *user)
{
	unsigned long lock_limit, locked;
	int allowed = 0;

	locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
J
Jiri Slaby 已提交
682
	lock_limit = rlimit(RLIMIT_MEMLOCK);
683 684
	if (lock_limit == RLIM_INFINITY)
		allowed = 1;
L
Linus Torvalds 已提交
685 686
	lock_limit >>= PAGE_SHIFT;
	spin_lock(&shmlock_user_lock);
687 688
	if (!allowed &&
	    locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
L
Linus Torvalds 已提交
689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704
		goto out;
	get_uid(user);
	user->locked_shm += locked;
	allowed = 1;
out:
	spin_unlock(&shmlock_user_lock);
	return allowed;
}

void user_shm_unlock(size_t size, struct user_struct *user)
{
	spin_lock(&shmlock_user_lock);
	user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
	spin_unlock(&shmlock_user_lock);
	free_uid(user);
}