提交 41bb3476 编写于 作者: C Cyrill Gorcunov 提交者: Linus Torvalds

mm: save soft-dirty bits on file pages

Andy reported that if file page get reclaimed we lose the soft-dirty bit
if it was there, so save _PAGE_BIT_SOFT_DIRTY bit when page address get
encoded into pte entry.  Thus when #pf happens on such non-present pte
we can restore it back.
Reported-by: NAndy Lutomirski <luto@amacapital.net>
Signed-off-by: NCyrill Gorcunov <gorcunov@openvz.org>
Acked-by: NPavel Emelyanov <xemul@parallels.com>
Cc: Matt Mackall <mpm@selenic.com>
Cc: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@gmail.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 179ef71c
...@@ -55,9 +55,53 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) ...@@ -55,9 +55,53 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
#endif #endif
#ifdef CONFIG_MEM_SOFT_DIRTY
/*
* Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE, _PAGE_BIT_SOFT_DIRTY and
* _PAGE_BIT_PROTNONE are taken, split up the 28 bits of offset
* into this range.
*/
#define PTE_FILE_MAX_BITS 28
#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1)
#define PTE_FILE_SHIFT2 (_PAGE_BIT_FILE + 1)
#define PTE_FILE_SHIFT3 (_PAGE_BIT_PROTNONE + 1)
#define PTE_FILE_SHIFT4 (_PAGE_BIT_SOFT_DIRTY + 1)
#define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1)
#define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1)
#define PTE_FILE_BITS3 (PTE_FILE_SHIFT4 - PTE_FILE_SHIFT3 - 1)
#define pte_to_pgoff(pte) \
((((pte).pte_low >> (PTE_FILE_SHIFT1)) \
& ((1U << PTE_FILE_BITS1) - 1))) \
+ ((((pte).pte_low >> (PTE_FILE_SHIFT2)) \
& ((1U << PTE_FILE_BITS2) - 1)) \
<< (PTE_FILE_BITS1)) \
+ ((((pte).pte_low >> (PTE_FILE_SHIFT3)) \
& ((1U << PTE_FILE_BITS3) - 1)) \
<< (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \
+ ((((pte).pte_low >> (PTE_FILE_SHIFT4))) \
<< (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))
#define pgoff_to_pte(off) \
((pte_t) { .pte_low = \
((((off)) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \
+ ((((off) >> PTE_FILE_BITS1) \
& ((1U << PTE_FILE_BITS2) - 1)) \
<< PTE_FILE_SHIFT2) \
+ ((((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \
& ((1U << PTE_FILE_BITS3) - 1)) \
<< PTE_FILE_SHIFT3) \
+ ((((off) >> \
(PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))) \
<< PTE_FILE_SHIFT4) \
+ _PAGE_FILE })
#else /* CONFIG_MEM_SOFT_DIRTY */
/* /*
* Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken, * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
* split up the 29 bits of offset into this range: * split up the 29 bits of offset into this range.
*/ */
#define PTE_FILE_MAX_BITS 29 #define PTE_FILE_MAX_BITS 29
#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1) #define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1)
...@@ -88,6 +132,8 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) ...@@ -88,6 +132,8 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
<< PTE_FILE_SHIFT3) \ << PTE_FILE_SHIFT3) \
+ _PAGE_FILE }) + _PAGE_FILE })
#endif /* CONFIG_MEM_SOFT_DIRTY */
/* Encode and de-code a swap entry */ /* Encode and de-code a swap entry */
#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1) #define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
......
...@@ -179,6 +179,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp) ...@@ -179,6 +179,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
/* /*
* Bits 0, 6 and 7 are taken in the low part of the pte, * Bits 0, 6 and 7 are taken in the low part of the pte,
* put the 32 bits of offset into the high part. * put the 32 bits of offset into the high part.
*
* For soft-dirty tracking 11 bit is taken from
* the low part of pte as well.
*/ */
#define pte_to_pgoff(pte) ((pte).pte_high) #define pte_to_pgoff(pte) ((pte).pte_high)
#define pgoff_to_pte(off) \ #define pgoff_to_pte(off) \
......
...@@ -329,6 +329,21 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) ...@@ -329,6 +329,21 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY); return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
} }
static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
{
return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
}
static inline pte_t pte_file_mksoft_dirty(pte_t pte)
{
return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
}
static inline int pte_file_soft_dirty(pte_t pte)
{
return pte_flags(pte) & _PAGE_SOFT_DIRTY;
}
/* /*
* Mask out unsupported bits in a present pgprot. Non-present pgprots * Mask out unsupported bits in a present pgprot. Non-present pgprots
* can use those bits for other purposes, so leave them be. * can use those bits for other purposes, so leave them be.
......
...@@ -61,8 +61,10 @@ ...@@ -61,8 +61,10 @@
* they do not conflict with each other. * they do not conflict with each other.
*/ */
#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_HIDDEN
#ifdef CONFIG_MEM_SOFT_DIRTY #ifdef CONFIG_MEM_SOFT_DIRTY
#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN) #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY)
#else #else
#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0)) #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0))
#endif #endif
......
...@@ -736,6 +736,8 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma, ...@@ -736,6 +736,8 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma,
ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY); ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY);
} else if (is_swap_pte(ptent)) { } else if (is_swap_pte(ptent)) {
ptent = pte_swp_clear_soft_dirty(ptent); ptent = pte_swp_clear_soft_dirty(ptent);
} else if (pte_file(ptent)) {
ptent = pte_file_clear_soft_dirty(ptent);
} }
set_pte_at(vma->vm_mm, addr, pte, ptent); set_pte_at(vma->vm_mm, addr, pte, ptent);
......
...@@ -432,6 +432,21 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) ...@@ -432,6 +432,21 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
{ {
return pte; return pte;
} }
static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
{
return pte;
}
static inline pte_t pte_file_mksoft_dirty(pte_t pte)
{
return pte;
}
static inline int pte_file_soft_dirty(pte_t pte)
{
return 0;
}
#endif #endif
#ifndef __HAVE_PFNMAP_TRACKING #ifndef __HAVE_PFNMAP_TRACKING
......
...@@ -57,17 +57,22 @@ static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -57,17 +57,22 @@ static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long pgoff, pgprot_t prot) unsigned long addr, unsigned long pgoff, pgprot_t prot)
{ {
int err = -ENOMEM; int err = -ENOMEM;
pte_t *pte; pte_t *pte, ptfile;
spinlock_t *ptl; spinlock_t *ptl;
pte = get_locked_pte(mm, addr, &ptl); pte = get_locked_pte(mm, addr, &ptl);
if (!pte) if (!pte)
goto out; goto out;
if (!pte_none(*pte)) ptfile = pgoff_to_pte(pgoff);
if (!pte_none(*pte)) {
if (pte_present(*pte) && pte_soft_dirty(*pte))
pte_file_mksoft_dirty(ptfile);
zap_pte(mm, vma, addr, pte); zap_pte(mm, vma, addr, pte);
}
set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff)); set_pte_at(mm, addr, pte, ptfile);
/* /*
* We don't need to run update_mmu_cache() here because the "file pte" * We don't need to run update_mmu_cache() here because the "file pte"
* being installed by install_file_pte() is not a real pte - it's a * being installed by install_file_pte() is not a real pte - it's a
......
...@@ -1141,9 +1141,12 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, ...@@ -1141,9 +1141,12 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
continue; continue;
if (unlikely(details) && details->nonlinear_vma if (unlikely(details) && details->nonlinear_vma
&& linear_page_index(details->nonlinear_vma, && linear_page_index(details->nonlinear_vma,
addr) != page->index) addr) != page->index) {
set_pte_at(mm, addr, pte, pte_t ptfile = pgoff_to_pte(page->index);
pgoff_to_pte(page->index)); if (pte_soft_dirty(ptent))
pte_file_mksoft_dirty(ptfile);
set_pte_at(mm, addr, pte, ptfile);
}
if (PageAnon(page)) if (PageAnon(page))
rss[MM_ANONPAGES]--; rss[MM_ANONPAGES]--;
else { else {
...@@ -3410,6 +3413,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -3410,6 +3413,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
entry = mk_pte(page, vma->vm_page_prot); entry = mk_pte(page, vma->vm_page_prot);
if (flags & FAULT_FLAG_WRITE) if (flags & FAULT_FLAG_WRITE)
entry = maybe_mkwrite(pte_mkdirty(entry), vma); entry = maybe_mkwrite(pte_mkdirty(entry), vma);
else if (pte_file(orig_pte) && pte_file_soft_dirty(orig_pte))
pte_mksoft_dirty(entry);
if (anon) { if (anon) {
inc_mm_counter_fast(mm, MM_ANONPAGES); inc_mm_counter_fast(mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, address); page_add_new_anon_rmap(page, vma, address);
......
...@@ -1405,8 +1405,12 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, ...@@ -1405,8 +1405,12 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
pteval = ptep_clear_flush(vma, address, pte); pteval = ptep_clear_flush(vma, address, pte);
/* If nonlinear, store the file page offset in the pte. */ /* If nonlinear, store the file page offset in the pte. */
if (page->index != linear_page_index(vma, address)) if (page->index != linear_page_index(vma, address)) {
set_pte_at(mm, address, pte, pgoff_to_pte(page->index)); pte_t ptfile = pgoff_to_pte(page->index);
if (pte_soft_dirty(pteval))
pte_file_mksoft_dirty(ptfile);
set_pte_at(mm, address, pte, ptfile);
}
/* Move the dirty bit to the physical page now the pte is gone. */ /* Move the dirty bit to the physical page now the pte is gone. */
if (pte_dirty(pteval)) if (pte_dirty(pteval))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册