madvise.c 22.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 *	linux/mm/madvise.c
 *
 * Copyright (C) 1999  Linus Torvalds
 * Copyright (C) 2002  Christoph Hellwig
 */

#include <linux/mman.h>
#include <linux/pagemap.h>
#include <linux/syscalls.h>
P
Prasanna Meda 已提交
11
#include <linux/mempolicy.h>
12
#include <linux/page-isolation.h>
13
#include <linux/userfaultfd_k.h>
L
Linus Torvalds 已提交
14
#include <linux/hugetlb.h>
15
#include <linux/falloc.h>
A
Alexey Dobriyan 已提交
16
#include <linux/sched.h>
H
Hugh Dickins 已提交
17
#include <linux/ksm.h>
18
#include <linux/fs.h>
19
#include <linux/file.h>
20
#include <linux/blkdev.h>
21
#include <linux/backing-dev.h>
22 23
#include <linux/swap.h>
#include <linux/swapops.h>
24
#include <linux/shmem_fs.h>
M
Minchan Kim 已提交
25 26 27
#include <linux/mmu_notifier.h>

#include <asm/tlb.h>
L
Linus Torvalds 已提交
28

29 30
#include "internal.h"

31 32 33 34 35 36 37 38 39 40 41
/*
 * Any behaviour which results in changes to the vma->vm_flags needs to
 * take mmap_sem for writing. Others, which simply traverse vmas, need
 * to only take it for reading.
 */
static int madvise_need_mmap_write(int behavior)
{
	switch (behavior) {
	case MADV_REMOVE:
	case MADV_WILLNEED:
	case MADV_DONTNEED:
M
Minchan Kim 已提交
42
	case MADV_FREE:
43 44 45 46 47 48 49
		return 0;
	default:
		/* be safe, default to 1. list exceptions explicitly */
		return 1;
	}
}

L
Linus Torvalds 已提交
50 51 52 53
/*
 * We can potentially split a vm area into separate
 * areas, each area with its own behavior.
 */
54
static long madvise_behavior(struct vm_area_struct *vma,
P
Prasanna Meda 已提交
55 56
		     struct vm_area_struct **prev,
		     unsigned long start, unsigned long end, int behavior)
L
Linus Torvalds 已提交
57
{
58
	struct mm_struct *mm = vma->vm_mm;
L
Linus Torvalds 已提交
59
	int error = 0;
P
Prasanna Meda 已提交
60
	pgoff_t pgoff;
H
Hugh Dickins 已提交
61
	unsigned long new_flags = vma->vm_flags;
62 63

	switch (behavior) {
64 65 66
	case MADV_NORMAL:
		new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
		break;
67
	case MADV_SEQUENTIAL:
68
		new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
69 70
		break;
	case MADV_RANDOM:
71
		new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
72
		break;
73 74 75 76
	case MADV_DONTFORK:
		new_flags |= VM_DONTCOPY;
		break;
	case MADV_DOFORK:
H
Hugh Dickins 已提交
77 78 79 80
		if (vma->vm_flags & VM_IO) {
			error = -EINVAL;
			goto out;
		}
81
		new_flags &= ~VM_DONTCOPY;
82
		break;
83 84 85 86 87 88 89 90 91 92 93
	case MADV_WIPEONFORK:
		/* MADV_WIPEONFORK is only supported on anonymous memory. */
		if (vma->vm_file || vma->vm_flags & VM_SHARED) {
			error = -EINVAL;
			goto out;
		}
		new_flags |= VM_WIPEONFORK;
		break;
	case MADV_KEEPONFORK:
		new_flags &= ~VM_WIPEONFORK;
		break;
94
	case MADV_DONTDUMP:
95
		new_flags |= VM_DONTDUMP;
96 97
		break;
	case MADV_DODUMP:
98 99 100 101 102
		if (new_flags & VM_SPECIAL) {
			error = -EINVAL;
			goto out;
		}
		new_flags &= ~VM_DONTDUMP;
103
		break;
H
Hugh Dickins 已提交
104 105 106
	case MADV_MERGEABLE:
	case MADV_UNMERGEABLE:
		error = ksm_madvise(vma, start, end, behavior, &new_flags);
107 108 109 110 111 112 113
		if (error) {
			/*
			 * madvise() returns EAGAIN if kernel resources, such as
			 * slab, are temporarily unavailable.
			 */
			if (error == -ENOMEM)
				error = -EAGAIN;
H
Hugh Dickins 已提交
114
			goto out;
115
		}
H
Hugh Dickins 已提交
116
		break;
A
Andrea Arcangeli 已提交
117
	case MADV_HUGEPAGE:
A
Andrea Arcangeli 已提交
118
	case MADV_NOHUGEPAGE:
119
		error = hugepage_madvise(vma, &new_flags, behavior);
120 121 122 123 124 125 126
		if (error) {
			/*
			 * madvise() returns EAGAIN if kernel resources, such as
			 * slab, are temporarily unavailable.
			 */
			if (error == -ENOMEM)
				error = -EAGAIN;
A
Andrea Arcangeli 已提交
127
			goto out;
128
		}
A
Andrea Arcangeli 已提交
129
		break;
130 131
	}

P
Prasanna Meda 已提交
132 133
	if (new_flags == vma->vm_flags) {
		*prev = vma;
134
		goto out;
P
Prasanna Meda 已提交
135 136 137 138
	}

	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
	*prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
139 140
			  vma->vm_file, pgoff, vma_policy(vma),
			  vma->vm_userfaultfd_ctx);
P
Prasanna Meda 已提交
141 142 143 144 145 146
	if (*prev) {
		vma = *prev;
		goto success;
	}

	*prev = vma;
L
Linus Torvalds 已提交
147 148

	if (start != vma->vm_start) {
149 150
		if (unlikely(mm->map_count >= sysctl_max_map_count)) {
			error = -ENOMEM;
L
Linus Torvalds 已提交
151
			goto out;
152 153 154 155 156 157 158 159 160 161 162
		}
		error = __split_vma(mm, vma, start, 1);
		if (error) {
			/*
			 * madvise() returns EAGAIN if kernel resources, such as
			 * slab, are temporarily unavailable.
			 */
			if (error == -ENOMEM)
				error = -EAGAIN;
			goto out;
		}
L
Linus Torvalds 已提交
163 164 165
	}

	if (end != vma->vm_end) {
166 167
		if (unlikely(mm->map_count >= sysctl_max_map_count)) {
			error = -ENOMEM;
L
Linus Torvalds 已提交
168
			goto out;
169 170 171 172 173 174 175 176 177 178 179
		}
		error = __split_vma(mm, vma, end, 0);
		if (error) {
			/*
			 * madvise() returns EAGAIN if kernel resources, such as
			 * slab, are temporarily unavailable.
			 */
			if (error == -ENOMEM)
				error = -EAGAIN;
			goto out;
		}
L
Linus Torvalds 已提交
180 181
	}

182
success:
L
Linus Torvalds 已提交
183 184 185
	/*
	 * vm_flags is protected by the mmap_sem held in write mode.
	 */
186
	vma->vm_flags = new_flags;
L
Linus Torvalds 已提交
187 188 189 190
out:
	return error;
}

191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
#ifdef CONFIG_SWAP
static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
	unsigned long end, struct mm_walk *walk)
{
	pte_t *orig_pte;
	struct vm_area_struct *vma = walk->private;
	unsigned long index;

	if (pmd_none_or_trans_huge_or_clear_bad(pmd))
		return 0;

	for (index = start; index != end; index += PAGE_SIZE) {
		pte_t pte;
		swp_entry_t entry;
		struct page *page;
		spinlock_t *ptl;

		orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
		pte = *(orig_pte + ((index - start) / PAGE_SIZE));
		pte_unmap_unlock(orig_pte, ptl);

212
		if (pte_present(pte) || pte_none(pte))
213 214 215 216 217 218
			continue;
		entry = pte_to_swp_entry(pte);
		if (unlikely(non_swap_entry(entry)))
			continue;

		page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
219
							vma, index, false);
220
		if (page)
221
			put_page(page);
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
	}

	return 0;
}

static void force_swapin_readahead(struct vm_area_struct *vma,
		unsigned long start, unsigned long end)
{
	struct mm_walk walk = {
		.mm = vma->vm_mm,
		.pmd_entry = swapin_walk_pmd_entry,
		.private = vma,
	};

	walk_page_range(start, end, &walk);

	lru_add_drain();	/* Push any new pages onto the LRU now */
}

static void force_shm_swapin_readahead(struct vm_area_struct *vma,
		unsigned long start, unsigned long end,
		struct address_space *mapping)
{
	pgoff_t index;
	struct page *page;
	swp_entry_t swap;

	for (; start < end; start += PAGE_SIZE) {
		index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;

252
		page = find_get_entry(mapping, index);
253 254
		if (!radix_tree_exceptional_entry(page)) {
			if (page)
255
				put_page(page);
256 257 258 259
			continue;
		}
		swap = radix_to_swp_entry(page);
		page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
260
							NULL, 0, false);
261
		if (page)
262
			put_page(page);
263 264 265 266 267 268
	}

	lru_add_drain();	/* Push any new pages onto the LRU now */
}
#endif		/* CONFIG_SWAP */

L
Linus Torvalds 已提交
269 270 271
/*
 * Schedule all required I/O operations.  Do not wait for completion.
 */
272 273
static long madvise_willneed(struct vm_area_struct *vma,
			     struct vm_area_struct **prev,
L
Linus Torvalds 已提交
274 275 276 277
			     unsigned long start, unsigned long end)
{
	struct file *file = vma->vm_file;

278
#ifdef CONFIG_SWAP
C
Christoph Hellwig 已提交
279
	if (!file) {
280
		*prev = vma;
C
Christoph Hellwig 已提交
281
		force_swapin_readahead(vma, start, end);
282 283 284
		return 0;
	}

C
Christoph Hellwig 已提交
285 286 287 288 289 290 291
	if (shmem_mapping(file->f_mapping)) {
		*prev = vma;
		force_shm_swapin_readahead(vma, start, end,
					file->f_mapping);
		return 0;
	}
#else
292 293
	if (!file)
		return -EBADF;
C
Christoph Hellwig 已提交
294
#endif
295

M
Matthew Wilcox 已提交
296
	if (IS_DAX(file_inode(file))) {
297 298 299 300
		/* no bad return value, but ignore advice */
		return 0;
	}

P
Prasanna Meda 已提交
301
	*prev = vma;
L
Linus Torvalds 已提交
302 303 304 305 306
	start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
	if (end > vma->vm_end)
		end = vma->vm_end;
	end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;

307
	force_page_cache_readahead(file->f_mapping, file, start, end - start);
L
Linus Torvalds 已提交
308 309 310
	return 0;
}

M
Minchan Kim 已提交
311 312 313 314 315 316 317 318 319 320
static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
				unsigned long end, struct mm_walk *walk)

{
	struct mmu_gather *tlb = walk->private;
	struct mm_struct *mm = tlb->mm;
	struct vm_area_struct *vma = walk->vma;
	spinlock_t *ptl;
	pte_t *orig_pte, *pte, ptent;
	struct page *page;
321
	int nr_swap = 0;
322 323 324 325 326 327
	unsigned long next;

	next = pmd_addr_end(addr, end);
	if (pmd_trans_huge(*pmd))
		if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next))
			goto next;
M
Minchan Kim 已提交
328 329 330 331

	if (pmd_trans_unstable(pmd))
		return 0;

332
	tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
M
Minchan Kim 已提交
333
	orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
334
	flush_tlb_batched_pending(mm);
M
Minchan Kim 已提交
335 336 337 338
	arch_enter_lazy_mmu_mode();
	for (; addr != end; pte++, addr += PAGE_SIZE) {
		ptent = *pte;

339
		if (pte_none(ptent))
M
Minchan Kim 已提交
340
			continue;
341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
		/*
		 * If the pte has swp_entry, just clear page table to
		 * prevent swap-in which is more expensive rather than
		 * (page allocation + zeroing).
		 */
		if (!pte_present(ptent)) {
			swp_entry_t entry;

			entry = pte_to_swp_entry(ptent);
			if (non_swap_entry(entry))
				continue;
			nr_swap--;
			free_swap_and_cache(entry);
			pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
			continue;
		}
M
Minchan Kim 已提交
357

358
		page = _vm_normal_page(vma, addr, ptent, true);
M
Minchan Kim 已提交
359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382
		if (!page)
			continue;

		/*
		 * If pmd isn't transhuge but the page is THP and
		 * is owned by only this process, split it and
		 * deactivate all pages.
		 */
		if (PageTransCompound(page)) {
			if (page_mapcount(page) != 1)
				goto out;
			get_page(page);
			if (!trylock_page(page)) {
				put_page(page);
				goto out;
			}
			pte_unmap_unlock(orig_pte, ptl);
			if (split_huge_page(page)) {
				unlock_page(page);
				put_page(page);
				pte_offset_map_lock(mm, pmd, addr, &ptl);
				goto out;
			}
			unlock_page(page);
383
			put_page(page);
M
Minchan Kim 已提交
384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427
			pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
			pte--;
			addr -= PAGE_SIZE;
			continue;
		}

		VM_BUG_ON_PAGE(PageTransCompound(page), page);

		if (PageSwapCache(page) || PageDirty(page)) {
			if (!trylock_page(page))
				continue;
			/*
			 * If page is shared with others, we couldn't clear
			 * PG_dirty of the page.
			 */
			if (page_mapcount(page) != 1) {
				unlock_page(page);
				continue;
			}

			if (PageSwapCache(page) && !try_to_free_swap(page)) {
				unlock_page(page);
				continue;
			}

			ClearPageDirty(page);
			unlock_page(page);
		}

		if (pte_young(ptent) || pte_dirty(ptent)) {
			/*
			 * Some of architecture(ex, PPC) don't update TLB
			 * with set_pte_at and tlb_remove_tlb_entry so for
			 * the portability, remap the pte with old|clean
			 * after pte clearing.
			 */
			ptent = ptep_get_and_clear_full(mm, addr, pte,
							tlb->fullmm);

			ptent = pte_mkold(ptent);
			ptent = pte_mkclean(ptent);
			set_pte_at(mm, addr, pte, ptent);
			tlb_remove_tlb_entry(tlb, pte, addr);
		}
S
Shaohua Li 已提交
428
		mark_page_lazyfree(page);
M
Minchan Kim 已提交
429 430
	}
out:
431 432 433 434 435 436
	if (nr_swap) {
		if (current->mm == mm)
			sync_mm_rss(mm);

		add_mm_counter(mm, MM_SWAPENTS, nr_swap);
	}
M
Minchan Kim 已提交
437 438 439
	arch_leave_lazy_mmu_mode();
	pte_unmap_unlock(orig_pte, ptl);
	cond_resched();
440
next:
M
Minchan Kim 已提交
441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488
	return 0;
}

static void madvise_free_page_range(struct mmu_gather *tlb,
			     struct vm_area_struct *vma,
			     unsigned long addr, unsigned long end)
{
	struct mm_walk free_walk = {
		.pmd_entry = madvise_free_pte_range,
		.mm = vma->vm_mm,
		.private = tlb,
	};

	tlb_start_vma(tlb, vma);
	walk_page_range(addr, end, &free_walk);
	tlb_end_vma(tlb, vma);
}

static int madvise_free_single_vma(struct vm_area_struct *vma,
			unsigned long start_addr, unsigned long end_addr)
{
	unsigned long start, end;
	struct mm_struct *mm = vma->vm_mm;
	struct mmu_gather tlb;

	/* MADV_FREE works for only anon vma at the moment */
	if (!vma_is_anonymous(vma))
		return -EINVAL;

	start = max(vma->vm_start, start_addr);
	if (start >= vma->vm_end)
		return -EINVAL;
	end = min(vma->vm_end, end_addr);
	if (end <= vma->vm_start)
		return -EINVAL;

	lru_add_drain();
	tlb_gather_mmu(&tlb, mm, start, end);
	update_hiwater_rss(mm);

	mmu_notifier_invalidate_range_start(mm, start, end);
	madvise_free_page_range(&tlb, vma, start, end);
	mmu_notifier_invalidate_range_end(mm, start, end);
	tlb_finish_mmu(&tlb, start, end);

	return 0;
}

L
Linus Torvalds 已提交
489 490 491 492
/*
 * Application no longer needs these pages.  If the pages are dirty,
 * it's OK to just throw them away.  The app will be more careful about
 * data it wants to keep.  Be sure to free swap resources too.  The
493
 * zap_page_range call sets things up for shrink_active_list to actually free
L
Linus Torvalds 已提交
494 495
 * these pages later if no one else has touched them in the meantime,
 * although we could add these pages to a global reuse list for
496
 * shrink_active_list to pick up before reclaiming other pages.
L
Linus Torvalds 已提交
497 498 499 500 501 502 503 504 505 506 507
 *
 * NB: This interface discards data rather than pushes it out to swap,
 * as some implementations do.  This has performance implications for
 * applications like large transactional databases which want to discard
 * pages in anonymous maps after committing to backing store the data
 * that was kept in them.  There is no reason to write this data out to
 * the swap area if the application is discarding it.
 *
 * An interface that causes the system to free clean pages and flush
 * dirty pages is already available as msync(MS_INVALIDATE).
 */
508 509 510 511 512 513 514 515 516 517 518
static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
					unsigned long start, unsigned long end)
{
	zap_page_range(vma, start, end - start);
	return 0;
}

static long madvise_dontneed_free(struct vm_area_struct *vma,
				  struct vm_area_struct **prev,
				  unsigned long start, unsigned long end,
				  int behavior)
L
Linus Torvalds 已提交
519
{
P
Prasanna Meda 已提交
520
	*prev = vma;
521
	if (!can_madv_dontneed_vma(vma))
L
Linus Torvalds 已提交
522 523
		return -EINVAL;

524 525 526 527 528 529 530 531 532 533 534 535 536 537
	if (!userfaultfd_remove(vma, start, end)) {
		*prev = NULL; /* mmap_sem has been dropped, prev is stale */

		down_read(&current->mm->mmap_sem);
		vma = find_vma(current->mm, start);
		if (!vma)
			return -ENOMEM;
		if (start < vma->vm_start) {
			/*
			 * This "vma" under revalidation is the one
			 * with the lowest vma->vm_start where start
			 * is also < vma->vm_end. If start <
			 * vma->vm_start it means an hole materialized
			 * in the user address space within the
538 539
			 * virtual range passed to MADV_DONTNEED
			 * or MADV_FREE.
540 541 542 543 544 545 546 547 548 549
			 */
			return -ENOMEM;
		}
		if (!can_madv_dontneed_vma(vma))
			return -EINVAL;
		if (end > vma->vm_end) {
			/*
			 * Don't fail if end > vma->vm_end. If the old
			 * vma was splitted while the mmap_sem was
			 * released the effect of the concurrent
550
			 * operation may not cause madvise() to
551 552 553 554 555 556 557 558 559 560 561
			 * have an undefined result. There may be an
			 * adjacent next vma that we'll walk
			 * next. userfaultfd_remove() will generate an
			 * UFFD_EVENT_REMOVE repetition on the
			 * end-vma->vm_end range, but the manager can
			 * handle a repetition fine.
			 */
			end = vma->vm_end;
		}
		VM_WARN_ON(start >= end);
	}
562 563 564 565 566 567 568

	if (behavior == MADV_DONTNEED)
		return madvise_dontneed_single_vma(vma, start, end);
	else if (behavior == MADV_FREE)
		return madvise_free_single_vma(vma, start, end);
	else
		return -EINVAL;
L
Linus Torvalds 已提交
569 570
}

571 572 573 574 575
/*
 * Application wants to free up the pages and associated backing store.
 * This is effectively punching a hole into the middle of a file.
 */
static long madvise_remove(struct vm_area_struct *vma,
576
				struct vm_area_struct **prev,
577 578
				unsigned long start, unsigned long end)
{
579
	loff_t offset;
580
	int error;
581
	struct file *f;
582

583
	*prev = NULL;	/* tell sys_madvise we drop mmap_sem */
584

585
	if (vma->vm_flags & VM_LOCKED)
586 587
		return -EINVAL;

588 589 590
	f = vma->vm_file;

	if (!f || !f->f_mapping || !f->f_mapping->host) {
591 592 593
			return -EINVAL;
	}

594 595 596
	if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
		return -EACCES;

597 598
	offset = (loff_t)(start - vma->vm_start)
			+ ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
599

600 601 602 603 604 605 606
	/*
	 * Filesystem's fallocate may need to take i_mutex.  We need to
	 * explicitly grab a reference because the vma (and hence the
	 * vma's reference to the file) can go away as soon as we drop
	 * mmap_sem.
	 */
	get_file(f);
607 608 609 610
	if (userfaultfd_remove(vma, start, end)) {
		/* mmap_sem was not released by userfaultfd_remove() */
		up_read(&current->mm->mmap_sem);
	}
611
	error = vfs_fallocate(f,
612 613
				FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
				offset, end - start);
614
	fput(f);
615
	down_read(&current->mm->mmap_sem);
616
	return error;
617 618
}

619 620 621 622
#ifdef CONFIG_MEMORY_FAILURE
/*
 * Error injection support for memory error handling.
 */
623 624
static int madvise_inject_error(int behavior,
		unsigned long start, unsigned long end)
625
{
626
	struct page *page;
627
	struct zone *zone;
628

629 630
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;
631

632
	for (; start < end; start += PAGE_SIZE <<
633
				compound_order(compound_head(page))) {
634 635
		int ret;

636
		ret = get_user_pages_fast(start, 1, 0, &page);
637 638
		if (ret != 1)
			return ret;
639

640 641
		if (PageHWPoison(page)) {
			put_page(page);
642 643
			continue;
		}
644 645 646 647 648 649

		if (behavior == MADV_SOFT_OFFLINE) {
			pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n",
						page_to_pfn(page), start);

			ret = soft_offline_page(page, MF_COUNT_INCREASED);
650
			if (ret)
651
				return ret;
652 653
			continue;
		}
654 655 656 657
		pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n",
						page_to_pfn(page), start);

		ret = memory_failure(page_to_pfn(page), 0, MF_COUNT_INCREASED);
658 659
		if (ret)
			return ret;
660
	}
661 662 663 664 665

	/* Ensure that all poisoned pages are removed from per-cpu lists */
	for_each_populated_zone(zone)
		drain_all_pages(zone);

666
	return 0;
667 668 669
}
#endif

670 671 672
static long
madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
		unsigned long start, unsigned long end, int behavior)
L
Linus Torvalds 已提交
673 674
{
	switch (behavior) {
675
	case MADV_REMOVE:
H
Hugh Dickins 已提交
676
		return madvise_remove(vma, prev, start, end);
L
Linus Torvalds 已提交
677
	case MADV_WILLNEED:
H
Hugh Dickins 已提交
678
		return madvise_willneed(vma, prev, start, end);
M
Minchan Kim 已提交
679
	case MADV_FREE:
L
Linus Torvalds 已提交
680
	case MADV_DONTNEED:
681
		return madvise_dontneed_free(vma, prev, start, end, behavior);
L
Linus Torvalds 已提交
682
	default:
H
Hugh Dickins 已提交
683
		return madvise_behavior(vma, prev, start, end, behavior);
L
Linus Torvalds 已提交
684 685 686
	}
}

687
static bool
N
Nick Piggin 已提交
688 689 690 691 692 693 694 695 696 697 698
madvise_behavior_valid(int behavior)
{
	switch (behavior) {
	case MADV_DOFORK:
	case MADV_DONTFORK:
	case MADV_NORMAL:
	case MADV_SEQUENTIAL:
	case MADV_RANDOM:
	case MADV_REMOVE:
	case MADV_WILLNEED:
	case MADV_DONTNEED:
M
Minchan Kim 已提交
699
	case MADV_FREE:
H
Hugh Dickins 已提交
700 701 702
#ifdef CONFIG_KSM
	case MADV_MERGEABLE:
	case MADV_UNMERGEABLE:
A
Andrea Arcangeli 已提交
703 704 705
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	case MADV_HUGEPAGE:
A
Andrea Arcangeli 已提交
706
	case MADV_NOHUGEPAGE:
H
Hugh Dickins 已提交
707
#endif
708 709
	case MADV_DONTDUMP:
	case MADV_DODUMP:
710 711
	case MADV_WIPEONFORK:
	case MADV_KEEPONFORK:
712 713 714 715
#ifdef CONFIG_MEMORY_FAILURE
	case MADV_SOFT_OFFLINE:
	case MADV_HWPOISON:
#endif
716
		return true;
N
Nick Piggin 已提交
717 718

	default:
719
		return false;
N
Nick Piggin 已提交
720 721
	}
}
H
Hugh Dickins 已提交
722

L
Linus Torvalds 已提交
723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744
/*
 * The madvise(2) system call.
 *
 * Applications can use madvise() to advise the kernel how it should
 * handle paging I/O in this VM area.  The idea is to help the kernel
 * use appropriate read-ahead and caching techniques.  The information
 * provided is advisory only, and can be safely disregarded by the
 * kernel without affecting the correct operation of the application.
 *
 * behavior values:
 *  MADV_NORMAL - the default behavior is to read clusters.  This
 *		results in some read-ahead and read-behind.
 *  MADV_RANDOM - the system should read the minimum amount of data
 *		on any access, since it is unlikely that the appli-
 *		cation will need more than what it asks for.
 *  MADV_SEQUENTIAL - pages in the given range will probably be accessed
 *		once, so they can be aggressively read ahead, and
 *		can be freed soon after they are accessed.
 *  MADV_WILLNEED - the application is notifying the system to read
 *		some pages ahead.
 *  MADV_DONTNEED - the application is finished with the given range,
 *		so the kernel can free resources associated with it.
745 746
 *  MADV_FREE - the application marks pages in the given range as lazy free,
 *		where actual purges are postponed until memory pressure happens.
747 748
 *  MADV_REMOVE - the application wants to free up the given range of
 *		pages and associated backing store.
H
Hugh Dickins 已提交
749 750 751
 *  MADV_DONTFORK - omit this area from child's address space when forking:
 *		typically, to avoid COWing pages pinned by get_user_pages().
 *  MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
752 753 754
 *  MADV_HWPOISON - trigger memory error handler as if the given memory range
 *		were corrupted by unrecoverable hardware memory failure.
 *  MADV_SOFT_OFFLINE - try to soft-offline the given range of memory.
H
Hugh Dickins 已提交
755 756 757
 *  MADV_MERGEABLE - the application recommends that KSM try to merge pages in
 *		this area with pages of identical content from other such areas.
 *  MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
758 759 760 761 762 763 764 765 766
 *  MADV_HUGEPAGE - the application wants to back the given range by transparent
 *		huge pages in the future. Existing pages might be coalesced and
 *		new pages might be allocated as THP.
 *  MADV_NOHUGEPAGE - mark the given range as not worth being backed by
 *		transparent huge pages so the existing pages will not be
 *		coalesced into THP and new pages will not be allocated as THP.
 *  MADV_DONTDUMP - the application wants to prevent pages in the given range
 *		from being included in its core dump.
 *  MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump.
L
Linus Torvalds 已提交
767 768 769 770 771 772 773 774 775 776 777 778
 *
 * return values:
 *  zero    - success
 *  -EINVAL - start + len < 0, start is not page-aligned,
 *		"behavior" is not a valid value, or application
 *		is attempting to release locked or shared pages.
 *  -ENOMEM - addresses in the specified range are not currently
 *		mapped, or are outside the AS of the process.
 *  -EIO    - an I/O error occurred while paging in data.
 *  -EBADF  - map exists, but area maps something that isn't a file.
 *  -EAGAIN - a kernel resource was temporarily unavailable.
 */
779
SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
L
Linus Torvalds 已提交
780
{
P
Prasanna Meda 已提交
781
	unsigned long end, tmp;
782
	struct vm_area_struct *vma, *prev;
L
Linus Torvalds 已提交
783 784
	int unmapped_error = 0;
	int error = -EINVAL;
785
	int write;
L
Linus Torvalds 已提交
786
	size_t len;
787
	struct blk_plug plug;
L
Linus Torvalds 已提交
788

N
Nick Piggin 已提交
789 790 791
	if (!madvise_behavior_valid(behavior))
		return error;

L
Linus Torvalds 已提交
792
	if (start & ~PAGE_MASK)
793
		return error;
L
Linus Torvalds 已提交
794 795 796 797
	len = (len_in + ~PAGE_MASK) & PAGE_MASK;

	/* Check to see whether len was rounded up from small -ve to zero */
	if (len_in && !len)
798
		return error;
L
Linus Torvalds 已提交
799 800 801

	end = start + len;
	if (end < start)
802
		return error;
L
Linus Torvalds 已提交
803 804 805

	error = 0;
	if (end == start)
806 807
		return error;

808 809 810 811 812
#ifdef CONFIG_MEMORY_FAILURE
	if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
		return madvise_inject_error(behavior, start, start + len_in);
#endif

813
	write = madvise_need_mmap_write(behavior);
814 815 816 817
	if (write) {
		if (down_write_killable(&current->mm->mmap_sem))
			return -EINTR;
	} else {
818
		down_read(&current->mm->mmap_sem);
819
	}
L
Linus Torvalds 已提交
820 821 822 823

	/*
	 * If the interval [start,end) covers some unmapped address
	 * ranges, just ignore them, but return -ENOMEM at the end.
P
Prasanna Meda 已提交
824
	 * - different from the way of handling in mlock etc.
L
Linus Torvalds 已提交
825
	 */
P
Prasanna Meda 已提交
826
	vma = find_vma_prev(current->mm, start, &prev);
827 828 829
	if (vma && start > vma->vm_start)
		prev = vma;

830
	blk_start_plug(&plug);
L
Linus Torvalds 已提交
831 832 833 834
	for (;;) {
		/* Still start < end. */
		error = -ENOMEM;
		if (!vma)
835
			goto out;
L
Linus Torvalds 已提交
836

P
Prasanna Meda 已提交
837
		/* Here start < (end|vma->vm_end). */
L
Linus Torvalds 已提交
838 839 840
		if (start < vma->vm_start) {
			unmapped_error = -ENOMEM;
			start = vma->vm_start;
P
Prasanna Meda 已提交
841
			if (start >= end)
842
				goto out;
L
Linus Torvalds 已提交
843 844
		}

P
Prasanna Meda 已提交
845 846 847 848
		/* Here vma->vm_start <= start < (end|vma->vm_end) */
		tmp = vma->vm_end;
		if (end < tmp)
			tmp = end;
L
Linus Torvalds 已提交
849

P
Prasanna Meda 已提交
850 851
		/* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
		error = madvise_vma(vma, &prev, start, tmp, behavior);
L
Linus Torvalds 已提交
852
		if (error)
853
			goto out;
P
Prasanna Meda 已提交
854
		start = tmp;
855
		if (prev && start < prev->vm_end)
P
Prasanna Meda 已提交
856 857 858
			start = prev->vm_end;
		error = unmapped_error;
		if (start >= end)
859
			goto out;
860 861 862 863
		if (prev)
			vma = prev->vm_next;
		else	/* madvise_remove dropped mmap_sem */
			vma = find_vma(current->mm, start);
L
Linus Torvalds 已提交
864 865
	}
out:
866
	blk_finish_plug(&plug);
867
	if (write)
868 869 870 871
		up_write(&current->mm->mmap_sem);
	else
		up_read(&current->mm->mmap_sem);

L
Linus Torvalds 已提交
872 873
	return error;
}