memory-failure.c 36.2 KB
Newer Older
1 2 3 4 5 6 7 8 9
/*
 * Copyright (C) 2008, 2009 Intel Corporation
 * Authors: Andi Kleen, Fengguang Wu
 *
 * This software may be redistributed and/or modified under the terms of
 * the GNU General Public License ("GPL") version 2 only as published by the
 * Free Software Foundation.
 *
 * High level machine check handler. Handles pages reported by the
10
 * hardware as being corrupted usually due to a multi-bit ECC memory or cache
11
 * failure.
12 13 14
 * 
 * In addition there is a "soft offline" entry point that allows stop using
 * not-yet-corrupted-by-suspicious pages without killing anything.
15 16
 *
 * Handles page cache pages in various states.	The tricky part
17 18 19 20 21 22 23 24 25 26 27 28 29
 * here is that we can access any page asynchronously in respect to 
 * other VM users, because memory failures could happen anytime and 
 * anywhere. This could violate some of their assumptions. This is why 
 * this code has to be extremely careful. Generally it tries to use 
 * normal locking rules, as in get the standard locks, even if that means 
 * the error handling takes potentially a long time.
 * 
 * There are several operations here with exponential complexity because
 * of unsuitable VM data structures. For example the operation to map back 
 * from RMAP chains to processes has to walk the complete process list and 
 * has non linear complexity with the number. But since memory corruptions
 * are rare we hope to get away with this. This avoids impacting the core 
 * VM.
30 31 32 33 34 35 36 37 38 39 40
 */

/*
 * Notebook:
 * - hugetlb needs more code
 * - kcore/oldmem/vmcore/mem/kmem check for hwpoison pages
 * - pass bad pages to kdump next kernel
 */
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/page-flags.h>
W
Wu Fengguang 已提交
41
#include <linux/kernel-page-flags.h>
42
#include <linux/sched.h>
H
Hugh Dickins 已提交
43
#include <linux/ksm.h>
44 45 46 47
#include <linux/rmap.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/backing-dev.h>
48 49 50
#include <linux/migrate.h>
#include <linux/page-isolation.h>
#include <linux/suspend.h>
51
#include <linux/slab.h>
52
#include <linux/swapops.h>
53
#include <linux/hugetlb.h>
54 55 56 57 58 59 60 61
#include "internal.h"

int sysctl_memory_failure_early_kill __read_mostly = 0;

int sysctl_memory_failure_recovery __read_mostly = 1;

atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);

62 63
#if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)

64
u32 hwpoison_filter_enable = 0;
W
Wu Fengguang 已提交
65 66
u32 hwpoison_filter_dev_major = ~0U;
u32 hwpoison_filter_dev_minor = ~0U;
W
Wu Fengguang 已提交
67 68
u64 hwpoison_filter_flags_mask;
u64 hwpoison_filter_flags_value;
69
EXPORT_SYMBOL_GPL(hwpoison_filter_enable);
W
Wu Fengguang 已提交
70 71
EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major);
EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor);
W
Wu Fengguang 已提交
72 73
EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask);
EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);
W
Wu Fengguang 已提交
74 75 76 77 78 79 80 81 82 83 84

static int hwpoison_filter_dev(struct page *p)
{
	struct address_space *mapping;
	dev_t dev;

	if (hwpoison_filter_dev_major == ~0U &&
	    hwpoison_filter_dev_minor == ~0U)
		return 0;

	/*
85
	 * page_mapping() does not accept slab pages.
W
Wu Fengguang 已提交
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
	 */
	if (PageSlab(p))
		return -EINVAL;

	mapping = page_mapping(p);
	if (mapping == NULL || mapping->host == NULL)
		return -EINVAL;

	dev = mapping->host->i_sb->s_dev;
	if (hwpoison_filter_dev_major != ~0U &&
	    hwpoison_filter_dev_major != MAJOR(dev))
		return -EINVAL;
	if (hwpoison_filter_dev_minor != ~0U &&
	    hwpoison_filter_dev_minor != MINOR(dev))
		return -EINVAL;

	return 0;
}

W
Wu Fengguang 已提交
105 106 107 108 109 110 111 112 113 114 115 116
static int hwpoison_filter_flags(struct page *p)
{
	if (!hwpoison_filter_flags_mask)
		return 0;

	if ((stable_page_flags(p) & hwpoison_filter_flags_mask) ==
				    hwpoison_filter_flags_value)
		return 0;
	else
		return -EINVAL;
}

A
Andi Kleen 已提交
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
/*
 * This allows stress tests to limit test scope to a collection of tasks
 * by putting them under some memcg. This prevents killing unrelated/important
 * processes such as /sbin/init. Note that the target task may share clean
 * pages with init (eg. libc text), which is harmless. If the target task
 * share _dirty_ pages with another task B, the test scheme must make sure B
 * is also included in the memcg. At last, due to race conditions this filter
 * can only guarantee that the page either belongs to the memcg tasks, or is
 * a freed page.
 */
#ifdef	CONFIG_CGROUP_MEM_RES_CTLR_SWAP
u64 hwpoison_filter_memcg;
EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
static int hwpoison_filter_task(struct page *p)
{
	struct mem_cgroup *mem;
	struct cgroup_subsys_state *css;
	unsigned long ino;

	if (!hwpoison_filter_memcg)
		return 0;

	mem = try_get_mem_cgroup_from_page(p);
	if (!mem)
		return -EINVAL;

	css = mem_cgroup_css(mem);
	/* root_mem_cgroup has NULL dentries */
	if (!css->cgroup->dentry)
		return -EINVAL;

	ino = css->cgroup->dentry->d_inode->i_ino;
	css_put(css);

	if (ino != hwpoison_filter_memcg)
		return -EINVAL;

	return 0;
}
#else
static int hwpoison_filter_task(struct page *p) { return 0; }
#endif

W
Wu Fengguang 已提交
160 161
int hwpoison_filter(struct page *p)
{
162 163 164
	if (!hwpoison_filter_enable)
		return 0;

W
Wu Fengguang 已提交
165 166 167
	if (hwpoison_filter_dev(p))
		return -EINVAL;

W
Wu Fengguang 已提交
168 169 170
	if (hwpoison_filter_flags(p))
		return -EINVAL;

A
Andi Kleen 已提交
171 172 173
	if (hwpoison_filter_task(p))
		return -EINVAL;

W
Wu Fengguang 已提交
174 175
	return 0;
}
176 177 178 179 180 181 182
#else
int hwpoison_filter(struct page *p)
{
	return 0;
}
#endif

W
Wu Fengguang 已提交
183 184
EXPORT_SYMBOL_GPL(hwpoison_filter);

185 186 187 188 189
/*
 * Send all the processes who have the page mapped an ``action optional''
 * signal.
 */
static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
190
			unsigned long pfn, struct page *page)
191 192 193 194 195 196 197 198 199 200 201 202 203 204
{
	struct siginfo si;
	int ret;

	printk(KERN_ERR
		"MCE %#lx: Killing %s:%d early due to hardware memory corruption\n",
		pfn, t->comm, t->pid);
	si.si_signo = SIGBUS;
	si.si_errno = 0;
	si.si_code = BUS_MCEERR_AO;
	si.si_addr = (void *)addr;
#ifdef __ARCH_SI_TRAPNO
	si.si_trapno = trapno;
#endif
205
	si.si_addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT;
206 207 208 209 210 211 212 213 214 215 216 217 218
	/*
	 * Don't use force here, it's convenient if the signal
	 * can be temporarily blocked.
	 * This could cause a loop when the user sets SIGBUS
	 * to SIG_IGN, but hopefully noone will do that?
	 */
	ret = send_sig_info(SIGBUS, &si, t);  /* synchronous? */
	if (ret < 0)
		printk(KERN_INFO "MCE: Error sending signal to %s:%d: %d\n",
		       t->comm, t->pid, ret);
	return ret;
}

219 220 221 222
/*
 * When a unknown page type is encountered drain as many buffers as possible
 * in the hope to turn the page into a LRU or free page, which we can handle.
 */
223
void shake_page(struct page *p, int access)
224 225 226 227 228 229 230 231 232
{
	if (!PageSlab(p)) {
		lru_add_drain_all();
		if (PageLRU(p))
			return;
		drain_all_pages();
		if (PageLRU(p) || is_free_buddy_page(p))
			return;
	}
233

234
	/*
235 236
	 * Only all shrink_slab here (which would also
	 * shrink other caches) if access is not potentially fatal.
237
	 */
238 239 240 241
	if (access) {
		int nr;
		do {
			nr = shrink_slab(1000, GFP_KERNEL, 1000);
242
			if (page_count(p) == 1)
243 244 245
				break;
		} while (nr > 10);
	}
246 247 248
}
EXPORT_SYMBOL_GPL(shake_page);

249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
/*
 * Kill all processes that have a poisoned page mapped and then isolate
 * the page.
 *
 * General strategy:
 * Find all processes having the page mapped and kill them.
 * But we keep a page reference around so that the page is not
 * actually freed yet.
 * Then stash the page away
 *
 * There's no convenient way to get back to mapped processes
 * from the VMAs. So do a brute-force search over all
 * running processes.
 *
 * Remember that machine checks are not common (or rather
 * if they are common you have other problems), so this shouldn't
 * be a performance issue.
 *
 * Also there are some races possible while we get from the
 * error detection to actually handle it.
 */

struct to_kill {
	struct list_head nd;
	struct task_struct *tsk;
	unsigned long addr;
275
	char addr_valid;
276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
};

/*
 * Failure handling: if we can't find or can't kill a process there's
 * not much we can do.	We just print a message and ignore otherwise.
 */

/*
 * Schedule a process for later kill.
 * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
 * TBD would GFP_NOIO be enough?
 */
static void add_to_kill(struct task_struct *tsk, struct page *p,
		       struct vm_area_struct *vma,
		       struct list_head *to_kill,
		       struct to_kill **tkc)
{
	struct to_kill *tk;

	if (*tkc) {
		tk = *tkc;
		*tkc = NULL;
	} else {
		tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
		if (!tk) {
			printk(KERN_ERR
		"MCE: Out of memory while machine check handling\n");
			return;
		}
	}
	tk->addr = page_address_in_vma(p, vma);
	tk->addr_valid = 1;

	/*
	 * In theory we don't have to kill when the page was
	 * munmaped. But it could be also a mremap. Since that's
	 * likely very rare kill anyways just out of paranoia, but use
	 * a SIGKILL because the error is not contained anymore.
	 */
	if (tk->addr == -EFAULT) {
316
		pr_info("MCE: Unable to find user space address %lx in %s\n",
317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333
			page_to_pfn(p), tsk->comm);
		tk->addr_valid = 0;
	}
	get_task_struct(tsk);
	tk->tsk = tsk;
	list_add_tail(&tk->nd, to_kill);
}

/*
 * Kill the processes that have been collected earlier.
 *
 * Only do anything when DOIT is set, otherwise just free the list
 * (this is used for clean pages which do not need killing)
 * Also when FAIL is set do a force kill because something went
 * wrong earlier.
 */
static void kill_procs_ao(struct list_head *to_kill, int doit, int trapno,
334
			  int fail, struct page *page, unsigned long pfn)
335 336 337 338 339 340
{
	struct to_kill *tk, *next;

	list_for_each_entry_safe (tk, next, to_kill, nd) {
		if (doit) {
			/*
341
			 * In case something went wrong with munmapping
342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
			 * make sure the process doesn't catch the
			 * signal and then access the memory. Just kill it.
			 */
			if (fail || tk->addr_valid == 0) {
				printk(KERN_ERR
		"MCE %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
					pfn, tk->tsk->comm, tk->tsk->pid);
				force_sig(SIGKILL, tk->tsk);
			}

			/*
			 * In theory the process could have mapped
			 * something else on the address in-between. We could
			 * check for that, but we need to tell the
			 * process anyways.
			 */
			else if (kill_proc_ao(tk->tsk, tk->addr, trapno,
359
					      pfn, page) < 0)
360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
				printk(KERN_ERR
		"MCE %#lx: Cannot send advisory machine check signal to %s:%d\n",
					pfn, tk->tsk->comm, tk->tsk->pid);
		}
		put_task_struct(tk->tsk);
		kfree(tk);
	}
}

static int task_early_kill(struct task_struct *tsk)
{
	if (!tsk->mm)
		return 0;
	if (tsk->flags & PF_MCE_PROCESS)
		return !!(tsk->flags & PF_MCE_EARLY);
	return sysctl_memory_failure_early_kill;
}

/*
 * Collect processes when the error hit an anonymous page.
 */
static void collect_procs_anon(struct page *page, struct list_head *to_kill,
			      struct to_kill **tkc)
{
	struct vm_area_struct *vma;
	struct task_struct *tsk;
	struct anon_vma *av;

	read_lock(&tasklist_lock);
	av = page_lock_anon_vma(page);
	if (av == NULL)	/* Not actually mapped anymore */
		goto out;
	for_each_process (tsk) {
393 394
		struct anon_vma_chain *vmac;

395 396
		if (!task_early_kill(tsk))
			continue;
397 398
		list_for_each_entry(vmac, &av->head, same_anon_vma) {
			vma = vmac->vma;
399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
			if (!page_mapped_in_vma(page, vma))
				continue;
			if (vma->vm_mm == tsk->mm)
				add_to_kill(tsk, page, vma, to_kill, tkc);
		}
	}
	page_unlock_anon_vma(av);
out:
	read_unlock(&tasklist_lock);
}

/*
 * Collect processes when the error hit a file mapped page.
 */
static void collect_procs_file(struct page *page, struct list_head *to_kill,
			      struct to_kill **tkc)
{
	struct vm_area_struct *vma;
	struct task_struct *tsk;
	struct prio_tree_iter iter;
	struct address_space *mapping = page->mapping;

	/*
	 * A note on the locking order between the two locks.
	 * We don't rely on this particular order.
	 * If you have some other code that needs a different order
	 * feel free to switch them around. Or add a reverse link
	 * from mm_struct to task_struct, then this could be all
	 * done without taking tasklist_lock and looping over all tasks.
	 */

	read_lock(&tasklist_lock);
	spin_lock(&mapping->i_mmap_lock);
	for_each_process(tsk) {
		pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);

		if (!task_early_kill(tsk))
			continue;

		vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff,
				      pgoff) {
			/*
			 * Send early kill signal to tasks where a vma covers
			 * the page but the corrupted page is not necessarily
			 * mapped it in its pte.
			 * Assume applications who requested early kill want
			 * to be informed of all such data corruptions.
			 */
			if (vma->vm_mm == tsk->mm)
				add_to_kill(tsk, page, vma, to_kill, tkc);
		}
	}
	spin_unlock(&mapping->i_mmap_lock);
	read_unlock(&tasklist_lock);
}

/*
 * Collect the processes who have the corrupted page mapped to kill.
 * This is done in two steps for locking reasons.
 * First preallocate one tokill structure outside the spin locks,
 * so that we can kill at least one process reasonably reliable.
 */
static void collect_procs(struct page *page, struct list_head *tokill)
{
	struct to_kill *tk;

	if (!page->mapping)
		return;

	tk = kmalloc(sizeof(struct to_kill), GFP_NOIO);
	if (!tk)
		return;
	if (PageAnon(page))
		collect_procs_anon(page, tokill, &tk);
	else
		collect_procs_file(page, tokill, &tk);
	kfree(tk);
}

/*
 * Error handlers for various types of pages.
 */

enum outcome {
483 484
	IGNORED,	/* Error: cannot be handled */
	FAILED,		/* Error: handling failed */
485 486 487 488 489
	DELAYED,	/* Will be handled later */
	RECOVERED,	/* Successfully recovered */
};

static const char *action_name[] = {
490
	[IGNORED] = "Ignored",
491 492 493 494 495
	[FAILED] = "Failed",
	[DELAYED] = "Delayed",
	[RECOVERED] = "Recovered",
};

496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519
/*
 * XXX: It is possible that a page is isolated from LRU cache,
 * and then kept in swap cache or failed to remove from page cache.
 * The page count will stop it from being freed by unpoison.
 * Stress tests should be aware of this memory leak problem.
 */
static int delete_from_lru_cache(struct page *p)
{
	if (!isolate_lru_page(p)) {
		/*
		 * Clear sensible page flags, so that the buddy system won't
		 * complain when the page is unpoison-and-freed.
		 */
		ClearPageActive(p);
		ClearPageUnevictable(p);
		/*
		 * drop the page count elevated by isolate_lru_page()
		 */
		page_cache_release(p);
		return 0;
	}
	return -EIO;
}

520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547
/*
 * Error hit kernel page.
 * Do nothing, try to be lucky and not touch this instead. For a few cases we
 * could be more sophisticated.
 */
static int me_kernel(struct page *p, unsigned long pfn)
{
	return IGNORED;
}

/*
 * Page in unknown state. Do nothing.
 */
static int me_unknown(struct page *p, unsigned long pfn)
{
	printk(KERN_ERR "MCE %#lx: Unknown page state\n", pfn);
	return FAILED;
}

/*
 * Clean (or cleaned) page cache page.
 */
static int me_pagecache_clean(struct page *p, unsigned long pfn)
{
	int err;
	int ret = FAILED;
	struct address_space *mapping;

548 549
	delete_from_lru_cache(p);

550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583
	/*
	 * For anonymous pages we're done the only reference left
	 * should be the one m_f() holds.
	 */
	if (PageAnon(p))
		return RECOVERED;

	/*
	 * Now truncate the page in the page cache. This is really
	 * more like a "temporary hole punch"
	 * Don't do this for block devices when someone else
	 * has a reference, because it could be file system metadata
	 * and that's not safe to truncate.
	 */
	mapping = page_mapping(p);
	if (!mapping) {
		/*
		 * Page has been teared down in the meanwhile
		 */
		return FAILED;
	}

	/*
	 * Truncation is a bit tricky. Enable it per file system for now.
	 *
	 * Open: to take i_mutex or not for this? Right now we don't.
	 */
	if (mapping->a_ops->error_remove_page) {
		err = mapping->a_ops->error_remove_page(mapping, p);
		if (err != 0) {
			printk(KERN_INFO "MCE %#lx: Failed to punch page: %d\n",
					pfn, err);
		} else if (page_has_private(p) &&
				!try_to_release_page(p, GFP_NOIO)) {
584
			pr_info("MCE %#lx: failed to release buffers\n", pfn);
585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678
		} else {
			ret = RECOVERED;
		}
	} else {
		/*
		 * If the file system doesn't support it just invalidate
		 * This fails on dirty or anything with private pages
		 */
		if (invalidate_inode_page(p))
			ret = RECOVERED;
		else
			printk(KERN_INFO "MCE %#lx: Failed to invalidate\n",
				pfn);
	}
	return ret;
}

/*
 * Dirty cache page page
 * Issues: when the error hit a hole page the error is not properly
 * propagated.
 */
static int me_pagecache_dirty(struct page *p, unsigned long pfn)
{
	struct address_space *mapping = page_mapping(p);

	SetPageError(p);
	/* TBD: print more information about the file. */
	if (mapping) {
		/*
		 * IO error will be reported by write(), fsync(), etc.
		 * who check the mapping.
		 * This way the application knows that something went
		 * wrong with its dirty file data.
		 *
		 * There's one open issue:
		 *
		 * The EIO will be only reported on the next IO
		 * operation and then cleared through the IO map.
		 * Normally Linux has two mechanisms to pass IO error
		 * first through the AS_EIO flag in the address space
		 * and then through the PageError flag in the page.
		 * Since we drop pages on memory failure handling the
		 * only mechanism open to use is through AS_AIO.
		 *
		 * This has the disadvantage that it gets cleared on
		 * the first operation that returns an error, while
		 * the PageError bit is more sticky and only cleared
		 * when the page is reread or dropped.  If an
		 * application assumes it will always get error on
		 * fsync, but does other operations on the fd before
		 * and the page is dropped inbetween then the error
		 * will not be properly reported.
		 *
		 * This can already happen even without hwpoisoned
		 * pages: first on metadata IO errors (which only
		 * report through AS_EIO) or when the page is dropped
		 * at the wrong time.
		 *
		 * So right now we assume that the application DTRT on
		 * the first EIO, but we're not worse than other parts
		 * of the kernel.
		 */
		mapping_set_error(mapping, EIO);
	}

	return me_pagecache_clean(p, pfn);
}

/*
 * Clean and dirty swap cache.
 *
 * Dirty swap cache page is tricky to handle. The page could live both in page
 * cache and swap cache(ie. page is freshly swapped in). So it could be
 * referenced concurrently by 2 types of PTEs:
 * normal PTEs and swap PTEs. We try to handle them consistently by calling
 * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs,
 * and then
 *      - clear dirty bit to prevent IO
 *      - remove from LRU
 *      - but keep in the swap cache, so that when we return to it on
 *        a later page fault, we know the application is accessing
 *        corrupted data and shall be killed (we installed simple
 *        interception code in do_swap_page to catch it).
 *
 * Clean swap cache pages can be directly isolated. A later page fault will
 * bring in the known good data from disk.
 */
static int me_swapcache_dirty(struct page *p, unsigned long pfn)
{
	ClearPageDirty(p);
	/* Trigger EIO in shmem: */
	ClearPageUptodate(p);

679 680 681 682
	if (!delete_from_lru_cache(p))
		return DELAYED;
	else
		return FAILED;
683 684 685 686 687
}

static int me_swapcache_clean(struct page *p, unsigned long pfn)
{
	delete_from_swap_cache(p);
688

689 690 691 692
	if (!delete_from_lru_cache(p))
		return RECOVERED;
	else
		return FAILED;
693 694 695 696 697
}

/*
 * Huge pages. Needs work.
 * Issues:
698 699 700 701
 * - Error on hugepage is contained in hugepage unit (not in raw page unit.)
 *   To narrow down kill region to one page, we need to break up pmd.
 * - To support soft-offlining for hugepage, we need to support hugepage
 *   migration.
702 703 704
 */
static int me_huge_page(struct page *p, unsigned long pfn)
{
705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720
	struct page *hpage = compound_head(p);
	/*
	 * We can safely recover from error on free or reserved (i.e.
	 * not in-use) hugepage by dequeuing it from freelist.
	 * To check whether a hugepage is in-use or not, we can't use
	 * page->lru because it can be used in other hugepage operations,
	 * such as __unmap_hugepage_range() and gather_surplus_pages().
	 * So instead we use page_mapping() and PageAnon().
	 * We assume that this function is called with page lock held,
	 * so there is no race between isolation and mapping/unmapping.
	 */
	if (!(page_mapping(hpage) || PageAnon(hpage))) {
		__isolate_hwpoisoned_huge_page(hpage);
		return RECOVERED;
	}
	return DELAYED;
721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754
}

/*
 * Various page states we can handle.
 *
 * A page state is defined by its current page->flags bits.
 * The table matches them in order and calls the right handler.
 *
 * This is quite tricky because we can access page at any time
 * in its live cycle, so all accesses have to be extremly careful.
 *
 * This is not complete. More states could be added.
 * For any missing state don't attempt recovery.
 */

#define dirty		(1UL << PG_dirty)
#define sc		(1UL << PG_swapcache)
#define unevict		(1UL << PG_unevictable)
#define mlock		(1UL << PG_mlocked)
#define writeback	(1UL << PG_writeback)
#define lru		(1UL << PG_lru)
#define swapbacked	(1UL << PG_swapbacked)
#define head		(1UL << PG_head)
#define tail		(1UL << PG_tail)
#define compound	(1UL << PG_compound)
#define slab		(1UL << PG_slab)
#define reserved	(1UL << PG_reserved)

static struct page_state {
	unsigned long mask;
	unsigned long res;
	char *msg;
	int (*action)(struct page *p, unsigned long pfn);
} error_states[] = {
755
	{ reserved,	reserved,	"reserved kernel",	me_kernel },
756 757 758 759
	/*
	 * free pages are specially detected outside this table:
	 * PG_buddy pages only make a small fraction of all free pages.
	 */
760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792

	/*
	 * Could in theory check if slab page is free or if we can drop
	 * currently unused objects without touching them. But just
	 * treat it as standard kernel for now.
	 */
	{ slab,		slab,		"kernel slab",	me_kernel },

#ifdef CONFIG_PAGEFLAGS_EXTENDED
	{ head,		head,		"huge",		me_huge_page },
	{ tail,		tail,		"huge",		me_huge_page },
#else
	{ compound,	compound,	"huge",		me_huge_page },
#endif

	{ sc|dirty,	sc|dirty,	"swapcache",	me_swapcache_dirty },
	{ sc|dirty,	sc,		"swapcache",	me_swapcache_clean },

	{ unevict|dirty, unevict|dirty,	"unevictable LRU", me_pagecache_dirty},
	{ unevict,	unevict,	"unevictable LRU", me_pagecache_clean},

	{ mlock|dirty,	mlock|dirty,	"mlocked LRU",	me_pagecache_dirty },
	{ mlock,	mlock,		"mlocked LRU",	me_pagecache_clean },

	{ lru|dirty,	lru|dirty,	"LRU",		me_pagecache_dirty },
	{ lru|dirty,	lru,		"clean LRU",	me_pagecache_clean },

	/*
	 * Catchall entry: must be at end.
	 */
	{ 0,		0,		"unknown page state",	me_unknown },
};

793 794 795 796 797 798 799 800 801 802 803 804 805
#undef dirty
#undef sc
#undef unevict
#undef mlock
#undef writeback
#undef lru
#undef swapbacked
#undef head
#undef tail
#undef compound
#undef slab
#undef reserved

806 807
static void action_result(unsigned long pfn, char *msg, int result)
{
808
	struct page *page = pfn_to_page(pfn);
809 810 811

	printk(KERN_ERR "MCE %#lx: %s%s page recovery: %s\n",
		pfn,
812
		PageDirty(page) ? "dirty " : "",
813 814 815 816
		msg, action_name[result]);
}

static int page_action(struct page_state *ps, struct page *p,
817
			unsigned long pfn)
818 819
{
	int result;
820
	int count;
821 822 823

	result = ps->action(p, pfn);
	action_result(pfn, ps->msg, result);
824

825
	count = page_count(p) - 1;
826 827 828
	if (ps->action == me_swapcache_dirty && result == DELAYED)
		count--;
	if (count != 0) {
829 830
		printk(KERN_ERR
		       "MCE %#lx: %s page still referenced by %d users\n",
831
		       pfn, ps->msg, count);
832 833
		result = FAILED;
	}
834 835 836 837 838 839

	/* Could do more checks here if page looks ok */
	/*
	 * Could adjust zone counters here to correct for the missing page.
	 */

840
	return (result == RECOVERED || result == DELAYED) ? 0 : -EBUSY;
841 842 843 844 845 846
}

/*
 * Do all that is necessary to remove user space mappings. Unmap
 * the pages and send SIGBUS to the processes if the data was dirty.
 */
W
Wu Fengguang 已提交
847
static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
848 849 850 851 852 853 854
				  int trapno)
{
	enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
	struct address_space *mapping;
	LIST_HEAD(tokill);
	int ret;
	int kill = 1;
855
	struct page *hpage = compound_head(p);
856

W
Wu Fengguang 已提交
857 858
	if (PageReserved(p) || PageSlab(p))
		return SWAP_SUCCESS;
859 860 861 862 863

	/*
	 * This check implies we don't kill processes if their pages
	 * are in the swap cache early. Those are always late kills.
	 */
864
	if (!page_mapped(hpage))
W
Wu Fengguang 已提交
865 866
		return SWAP_SUCCESS;

867
	if (PageKsm(p))
W
Wu Fengguang 已提交
868
		return SWAP_FAIL;
869 870 871 872 873 874 875 876 877 878

	if (PageSwapCache(p)) {
		printk(KERN_ERR
		       "MCE %#lx: keeping poisoned page in swap cache\n", pfn);
		ttu |= TTU_IGNORE_HWPOISON;
	}

	/*
	 * Propagate the dirty bit from PTEs to struct page first, because we
	 * need this to decide if we should kill or just drop the page.
879 880
	 * XXX: the dirty test could be racy: set_page_dirty() may not always
	 * be called inside page lock (it's recommended but not enforced).
881
	 */
882 883 884 885 886
	mapping = page_mapping(hpage);
	if (!PageDirty(hpage) && mapping &&
	    mapping_cap_writeback_dirty(mapping)) {
		if (page_mkclean(hpage)) {
			SetPageDirty(hpage);
887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904
		} else {
			kill = 0;
			ttu |= TTU_IGNORE_HWPOISON;
			printk(KERN_INFO
	"MCE %#lx: corrupted page was clean: dropped without side effects\n",
				pfn);
		}
	}

	/*
	 * First collect all the processes that have the page
	 * mapped in dirty form.  This has to be done before try_to_unmap,
	 * because ttu takes the rmap data structures down.
	 *
	 * Error handling: We ignore errors here because
	 * there's nothing that can be done.
	 */
	if (kill)
905
		collect_procs(hpage, &tokill);
906

907
	ret = try_to_unmap(hpage, ttu);
908 909
	if (ret != SWAP_SUCCESS)
		printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n",
910
				pfn, page_mapcount(hpage));
911 912 913 914 915 916 917 918 919 920

	/*
	 * Now that the dirty bit has been propagated to the
	 * struct page and all unmaps done we can decide if
	 * killing is needed or not.  Only kill when the page
	 * was dirty, otherwise the tokill list is merely
	 * freed.  When there was a problem unmapping earlier
	 * use a more force-full uncatchable kill to prevent
	 * any accesses to the poisoned memory.
	 */
921
	kill_procs_ao(&tokill, !!PageDirty(hpage), trapno,
922
		      ret != SWAP_SUCCESS, p, pfn);
W
Wu Fengguang 已提交
923 924

	return ret;
925 926
}

927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942
static void set_page_hwpoison_huge_page(struct page *hpage)
{
	int i;
	int nr_pages = 1 << compound_order(hpage);
	for (i = 0; i < nr_pages; i++)
		SetPageHWPoison(hpage + i);
}

static void clear_page_hwpoison_huge_page(struct page *hpage)
{
	int i;
	int nr_pages = 1 << compound_order(hpage);
	for (i = 0; i < nr_pages; i++)
		ClearPageHWPoison(hpage + i);
}

943
int __memory_failure(unsigned long pfn, int trapno, int flags)
944 945 946
{
	struct page_state *ps;
	struct page *p;
947
	struct page *hpage;
948
	int res;
949
	unsigned int nr_pages;
950 951 952 953 954

	if (!sysctl_memory_failure_recovery)
		panic("Memory failure from trap %d on page %lx", trapno, pfn);

	if (!pfn_valid(pfn)) {
955 956 957 958
		printk(KERN_ERR
		       "MCE %#lx: memory outside kernel control\n",
		       pfn);
		return -ENXIO;
959 960 961
	}

	p = pfn_to_page(pfn);
962
	hpage = compound_head(p);
963
	if (TestSetPageHWPoison(p)) {
964
		printk(KERN_ERR "MCE %#lx: already hardware poisoned\n", pfn);
965 966 967
		return 0;
	}

968 969
	nr_pages = 1 << compound_order(hpage);
	atomic_long_add(nr_pages, &mce_bad_pages);
970 971 972 973 974 975 976 977 978 979 980 981

	/*
	 * We need/can do nothing about count=0 pages.
	 * 1) it's a free page, and therefore in safe hand:
	 *    prep_new_page() will be the gate keeper.
	 * 2) it's part of a non-compound high order page.
	 *    Implies some kernel user: cannot stop them from
	 *    R/W the page; let's pray that the page has been
	 *    used and will be freed some time later.
	 * In fact it's dangerous to directly bump up page count from 0,
	 * that may make page_freeze_refs()/page_unfreeze_refs() mismatch.
	 */
982
	if (!(flags & MF_COUNT_INCREASED) &&
983
		!get_page_unless_zero(hpage)) {
984 985 986 987 988 989 990
		if (is_free_buddy_page(p)) {
			action_result(pfn, "free buddy", DELAYED);
			return 0;
		} else {
			action_result(pfn, "high order kernel", IGNORED);
			return -EBUSY;
		}
991 992
	}

993 994 995 996 997 998 999 1000
	/*
	 * We ignore non-LRU pages for good reasons.
	 * - PG_locked is only well defined for LRU pages and a few others
	 * - to avoid races with __set_page_locked()
	 * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
	 * The check (unnecessarily) ignores LRU pages being isolated and
	 * walked by the page reclaim code, however that's not a big loss.
	 */
1001
	if (!PageLRU(p) && !PageHuge(p))
1002
		shake_page(p, 0);
1003
	if (!PageLRU(p) && !PageHuge(p)) {
1004 1005 1006 1007 1008 1009 1010
		/*
		 * shake_page could have turned it free.
		 */
		if (is_free_buddy_page(p)) {
			action_result(pfn, "free buddy, 2nd try", DELAYED);
			return 0;
		}
1011 1012 1013 1014 1015
		action_result(pfn, "non LRU", IGNORED);
		put_page(p);
		return -EBUSY;
	}

1016 1017 1018 1019 1020
	/*
	 * Lock the page and wait for writeback to finish.
	 * It's very difficult to mess with pages currently under IO
	 * and in many cases impossible, so we just avoid it here.
	 */
1021
	lock_page_nosync(hpage);
W
Wu Fengguang 已提交
1022 1023 1024 1025 1026

	/*
	 * unpoison always clear PG_hwpoison inside page lock
	 */
	if (!PageHWPoison(p)) {
1027
		printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
W
Wu Fengguang 已提交
1028 1029 1030
		res = 0;
		goto out;
	}
W
Wu Fengguang 已提交
1031 1032
	if (hwpoison_filter(p)) {
		if (TestClearPageHWPoison(p))
1033
			atomic_long_sub(nr_pages, &mce_bad_pages);
1034 1035
		unlock_page(hpage);
		put_page(hpage);
W
Wu Fengguang 已提交
1036 1037
		return 0;
	}
W
Wu Fengguang 已提交
1038

1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058
	/*
	 * For error on the tail page, we should set PG_hwpoison
	 * on the head page to show that the hugepage is hwpoisoned
	 */
	if (PageTail(p) && TestSetPageHWPoison(hpage)) {
		action_result(pfn, "hugepage already hardware poisoned",
				IGNORED);
		unlock_page(hpage);
		put_page(hpage);
		return 0;
	}
	/*
	 * Set PG_hwpoison on all pages in an error hugepage,
	 * because containment is done in hugepage unit for now.
	 * Since we have done TestSetPageHWPoison() for the head page with
	 * page lock held, we can safely set PG_hwpoison bits on tail pages.
	 */
	if (PageHuge(p))
		set_page_hwpoison_huge_page(hpage);

1059 1060 1061 1062
	wait_on_page_writeback(p);

	/*
	 * Now take care of user space mappings.
W
Wu Fengguang 已提交
1063
	 * Abort on fail: __remove_from_page_cache() assumes unmapped page.
1064
	 */
W
Wu Fengguang 已提交
1065 1066 1067 1068 1069
	if (hwpoison_user_mappings(p, pfn, trapno) != SWAP_SUCCESS) {
		printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn);
		res = -EBUSY;
		goto out;
	}
1070 1071 1072 1073

	/*
	 * Torn down by someone else?
	 */
1074
	if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
1075
		action_result(pfn, "already truncated LRU", IGNORED);
1076
		res = -EBUSY;
1077 1078 1079 1080 1081
		goto out;
	}

	res = -EBUSY;
	for (ps = error_states;; ps++) {
1082
		if ((p->flags & ps->mask) == ps->res) {
1083
			res = page_action(ps, p, pfn);
1084 1085 1086 1087
			break;
		}
	}
out:
1088
	unlock_page(hpage);
1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113
	return res;
}
EXPORT_SYMBOL_GPL(__memory_failure);

/**
 * memory_failure - Handle memory failure of a page.
 * @pfn: Page Number of the corrupted page
 * @trapno: Trap number reported in the signal to user space.
 *
 * This function is called by the low level machine check code
 * of an architecture when it detects hardware memory corruption
 * of a page. It tries its best to recover, which includes
 * dropping pages, killing processes etc.
 *
 * The function is primarily of use for corruptions that
 * happen outside the current execution context (e.g. when
 * detected by a background scrubber)
 *
 * Must run in process context (e.g. a work queue) with interrupts
 * enabled and no spinlocks hold.
 */
void memory_failure(unsigned long pfn, int trapno)
{
	__memory_failure(pfn, trapno, 0);
}
W
Wu Fengguang 已提交
1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131

/**
 * unpoison_memory - Unpoison a previously poisoned page
 * @pfn: Page number of the to be unpoisoned page
 *
 * Software-unpoison a page that has been poisoned by
 * memory_failure() earlier.
 *
 * This is only done on the software-level, so it only works
 * for linux injected failures, not real hardware failures
 *
 * Returns 0 for success, otherwise -errno.
 */
int unpoison_memory(unsigned long pfn)
{
	struct page *page;
	struct page *p;
	int freeit = 0;
1132
	unsigned int nr_pages;
W
Wu Fengguang 已提交
1133 1134 1135 1136 1137 1138 1139 1140

	if (!pfn_valid(pfn))
		return -ENXIO;

	p = pfn_to_page(pfn);
	page = compound_head(p);

	if (!PageHWPoison(p)) {
1141
		pr_info("MCE: Page was already unpoisoned %#lx\n", pfn);
W
Wu Fengguang 已提交
1142 1143 1144
		return 0;
	}

1145 1146
	nr_pages = 1 << compound_order(page);

W
Wu Fengguang 已提交
1147 1148
	if (!get_page_unless_zero(page)) {
		if (TestClearPageHWPoison(p))
1149
			atomic_long_sub(nr_pages, &mce_bad_pages);
1150
		pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
W
Wu Fengguang 已提交
1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
		return 0;
	}

	lock_page_nosync(page);
	/*
	 * This test is racy because PG_hwpoison is set outside of page lock.
	 * That's acceptable because that won't trigger kernel panic. Instead,
	 * the PG_hwpoison page will be caught and isolated on the entrance to
	 * the free buddy page pool.
	 */
1161
	if (TestClearPageHWPoison(page)) {
1162
		pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
1163
		atomic_long_sub(nr_pages, &mce_bad_pages);
W
Wu Fengguang 已提交
1164 1165
		freeit = 1;
	}
1166 1167
	if (PageHuge(p))
		clear_page_hwpoison_huge_page(page);
W
Wu Fengguang 已提交
1168 1169 1170 1171 1172 1173 1174 1175 1176
	unlock_page(page);

	put_page(page);
	if (freeit)
		put_page(page);

	return 0;
}
EXPORT_SYMBOL(unpoison_memory);
1177 1178 1179

static struct page *new_page(struct page *p, unsigned long private, int **x)
{
1180 1181
	int nid = page_to_nid(p);
	return alloc_pages_exact_node(nid, GFP_HIGHUSER_MOVABLE, 0);
1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210
}

/*
 * Safely get reference count of an arbitrary page.
 * Returns 0 for a free page, -EIO for a zero refcount page
 * that is not free, and 1 for any other page type.
 * For 1 the page is returned with increased page count, otherwise not.
 */
static int get_any_page(struct page *p, unsigned long pfn, int flags)
{
	int ret;

	if (flags & MF_COUNT_INCREASED)
		return 1;

	/*
	 * The lock_system_sleep prevents a race with memory hotplug,
	 * because the isolation assumes there's only a single user.
	 * This is a big hammer, a better would be nicer.
	 */
	lock_system_sleep();

	/*
	 * Isolate the page, so that it doesn't get reallocated if it
	 * was free.
	 */
	set_migratetype_isolate(p);
	if (!get_page_unless_zero(compound_head(p))) {
		if (is_free_buddy_page(p)) {
1211
			pr_info("get_any_page: %#lx free buddy page\n", pfn);
1212 1213 1214 1215
			/* Set hwpoison bit while page is still isolated */
			SetPageHWPoison(p);
			ret = 0;
		} else {
1216
			pr_info("get_any_page: %#lx: unknown zero refcount page type %lx\n",
1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281
				pfn, p->flags);
			ret = -EIO;
		}
	} else {
		/* Not a free page */
		ret = 1;
	}
	unset_migratetype_isolate(p);
	unlock_system_sleep();
	return ret;
}

/**
 * soft_offline_page - Soft offline a page.
 * @page: page to offline
 * @flags: flags. Same as memory_failure().
 *
 * Returns 0 on success, otherwise negated errno.
 *
 * Soft offline a page, by migration or invalidation,
 * without killing anything. This is for the case when
 * a page is not corrupted yet (so it's still valid to access),
 * but has had a number of corrected errors and is better taken
 * out.
 *
 * The actual policy on when to do that is maintained by
 * user space.
 *
 * This should never impact any application or cause data loss,
 * however it might take some time.
 *
 * This is not a 100% solution for all memory, but tries to be
 * ``good enough'' for the majority of memory.
 */
int soft_offline_page(struct page *page, int flags)
{
	int ret;
	unsigned long pfn = page_to_pfn(page);

	ret = get_any_page(page, pfn, flags);
	if (ret < 0)
		return ret;
	if (ret == 0)
		goto done;

	/*
	 * Page cache page we can handle?
	 */
	if (!PageLRU(page)) {
		/*
		 * Try to free it.
		 */
		put_page(page);
		shake_page(page, 1);

		/*
		 * Did it turn free?
		 */
		ret = get_any_page(page, pfn, 0);
		if (ret < 0)
			return ret;
		if (ret == 0)
			goto done;
	}
	if (!PageLRU(page)) {
1282
		pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295
				pfn, page->flags);
		return -EIO;
	}

	lock_page(page);
	wait_on_page_writeback(page);

	/*
	 * Synchronized using the page lock with memory_failure()
	 */
	if (PageHWPoison(page)) {
		unlock_page(page);
		put_page(page);
1296
		pr_info("soft offline: %#lx page already poisoned\n", pfn);
1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316
		return -EBUSY;
	}

	/*
	 * Try to invalidate first. This should work for
	 * non dirty unmapped page cache pages.
	 */
	ret = invalidate_inode_page(page);
	unlock_page(page);

	/*
	 * Drop count because page migration doesn't like raised
	 * counts. The page could get re-allocated, but if it becomes
	 * LRU the isolation will just fail.
	 * RED-PEN would be better to keep it isolated here, but we
	 * would need to fix isolation locking first.
	 */
	put_page(page);
	if (ret == 1) {
		ret = 0;
1317
		pr_info("soft_offline: %#lx: invalidated\n", pfn);
1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332
		goto done;
	}

	/*
	 * Simple invalidation didn't work.
	 * Try to migrate to a new page instead. migrate.c
	 * handles a large number of cases for us.
	 */
	ret = isolate_lru_page(page);
	if (!ret) {
		LIST_HEAD(pagelist);

		list_add(&page->lru, &pagelist);
		ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0);
		if (ret) {
1333
			pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
1334 1335 1336 1337 1338
				pfn, ret, page->flags);
			if (ret > 0)
				ret = -EIO;
		}
	} else {
1339
		pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350
				pfn, ret, page_count(page), page->flags);
	}
	if (ret)
		return ret;

done:
	atomic_long_add(1, &mce_bad_pages);
	SetPageHWPoison(page);
	/* keep elevated page count for bad page */
	return ret;
}
1351

1352 1353 1354
/*
 * The caller must hold current->mm->mmap_sem in read mode.
 */
1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382
int is_hwpoison_address(unsigned long addr)
{
	pgd_t *pgdp;
	pud_t pud, *pudp;
	pmd_t pmd, *pmdp;
	pte_t pte, *ptep;
	swp_entry_t entry;

	pgdp = pgd_offset(current->mm, addr);
	if (!pgd_present(*pgdp))
		return 0;
	pudp = pud_offset(pgdp, addr);
	pud = *pudp;
	if (!pud_present(pud) || pud_large(pud))
		return 0;
	pmdp = pmd_offset(pudp, addr);
	pmd = *pmdp;
	if (!pmd_present(pmd) || pmd_large(pmd))
		return 0;
	ptep = pte_offset_map(pmdp, addr);
	pte = *ptep;
	pte_unmap(ptep);
	if (!is_swap_pte(pte))
		return 0;
	entry = pte_to_swp_entry(pte);
	return is_hwpoison_entry(entry);
}
EXPORT_SYMBOL_GPL(is_hwpoison_address);