提交 fd6a03ed 编写于 作者: N Naoya Horiguchi 提交者: Andi Kleen

HWPOISON, hugetlb: detect hwpoison in hugetlb code

This patch enables to block access to hwpoisoned hugepage and
also enables to block unmapping for it.

Dependency:
  "HWPOISON, hugetlb: enable error handling path for hugepage"
Signed-off-by: NNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Acked-by: NFengguang Wu <fengguang.wu@intel.com>
Acked-by: NMel Gorman <mel@csn.ul.ie>
Signed-off-by: NAndi Kleen <ak@linux.intel.com>
上级 93f70f90
...@@ -19,6 +19,8 @@ ...@@ -19,6 +19,8 @@
#include <linux/sysfs.h> #include <linux/sysfs.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/rmap.h> #include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -2149,6 +2151,19 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, ...@@ -2149,6 +2151,19 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
return -ENOMEM; return -ENOMEM;
} }
static int is_hugetlb_entry_hwpoisoned(pte_t pte)
{
swp_entry_t swp;
if (huge_pte_none(pte) || pte_present(pte))
return 0;
swp = pte_to_swp_entry(pte);
if (non_swap_entry(swp) && is_hwpoison_entry(swp)) {
return 1;
} else
return 0;
}
void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end, struct page *ref_page) unsigned long end, struct page *ref_page)
{ {
...@@ -2207,6 +2222,12 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, ...@@ -2207,6 +2222,12 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
if (huge_pte_none(pte)) if (huge_pte_none(pte))
continue; continue;
/*
* HWPoisoned hugepage is already unmapped and dropped reference
*/
if (unlikely(is_hugetlb_entry_hwpoisoned(pte)))
continue;
page = pte_page(pte); page = pte_page(pte);
if (pte_dirty(pte)) if (pte_dirty(pte))
set_page_dirty(page); set_page_dirty(page);
...@@ -2490,6 +2511,18 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2490,6 +2511,18 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
page_dup_rmap(page); page_dup_rmap(page);
} }
/*
* Since memory error handler replaces pte into hwpoison swap entry
* at the time of error handling, a process which reserved but not have
* the mapping to the error hugepage does not have hwpoison swap entry.
* So we need to block accesses from such a process by checking
* PG_hwpoison bit here.
*/
if (unlikely(PageHWPoison(page))) {
ret = VM_FAULT_HWPOISON;
goto backout_unlocked;
}
/* /*
* If we are going to COW a private mapping later, we examine the * If we are going to COW a private mapping later, we examine the
* pending reservations for this page now. This will ensure that * pending reservations for this page now. This will ensure that
...@@ -2544,6 +2577,13 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2544,6 +2577,13 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
static DEFINE_MUTEX(hugetlb_instantiation_mutex); static DEFINE_MUTEX(hugetlb_instantiation_mutex);
struct hstate *h = hstate_vma(vma); struct hstate *h = hstate_vma(vma);
ptep = huge_pte_offset(mm, address);
if (ptep) {
entry = huge_ptep_get(ptep);
if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
return VM_FAULT_HWPOISON;
}
ptep = huge_pte_alloc(mm, address, huge_page_size(h)); ptep = huge_pte_alloc(mm, address, huge_page_size(h));
if (!ptep) if (!ptep)
return VM_FAULT_OOM; return VM_FAULT_OOM;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册