提交 9a840895 编写于 作者: H Hugh Dickins 提交者: Linus Torvalds

ksm: identify PageKsm pages

KSM will need to identify its kernel merged pages unambiguously, and
/proc/kpageflags will probably like to do so too.

Since KSM will only be substituting anonymous pages, statistics are best
preserved by making a PageKsm page a special PageAnon page: one with no
anon_vma.

But KSM then needs its own page_add_ksm_rmap() - keep it in ksm.h near
PageKsm; and do_wp_page() must COW them, unlike singly mapped PageAnons.
Signed-off-by: NHugh Dickins <hugh.dickins@tiscali.co.uk>
Signed-off-by: NChris Wright <chrisw@redhat.com>
Signed-off-by: NIzik Eidus <ieidus@redhat.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Avi Kivity <avi@redhat.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 21333b2b
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/ksm.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/mmzone.h> #include <linux/mmzone.h>
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
...@@ -95,6 +96,8 @@ static const struct file_operations proc_kpagecount_operations = { ...@@ -95,6 +96,8 @@ static const struct file_operations proc_kpagecount_operations = {
#define KPF_UNEVICTABLE 18 #define KPF_UNEVICTABLE 18
#define KPF_NOPAGE 20 #define KPF_NOPAGE 20
#define KPF_KSM 21
/* kernel hacking assistances /* kernel hacking assistances
* WARNING: subject to change, never rely on them! * WARNING: subject to change, never rely on them!
*/ */
...@@ -137,6 +140,8 @@ static u64 get_uflags(struct page *page) ...@@ -137,6 +140,8 @@ static u64 get_uflags(struct page *page)
u |= 1 << KPF_MMAP; u |= 1 << KPF_MMAP;
if (PageAnon(page)) if (PageAnon(page))
u |= 1 << KPF_ANON; u |= 1 << KPF_ANON;
if (PageKsm(page))
u |= 1 << KPF_KSM;
/* /*
* compound pages: export both head/tail info * compound pages: export both head/tail info
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/vmstat.h>
#ifdef CONFIG_KSM #ifdef CONFIG_KSM
int ksm_madvise(struct vm_area_struct *vma, unsigned long start, int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
...@@ -29,6 +30,27 @@ static inline void ksm_exit(struct mm_struct *mm) ...@@ -29,6 +30,27 @@ static inline void ksm_exit(struct mm_struct *mm)
if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
__ksm_exit(mm); __ksm_exit(mm);
} }
/*
* A KSM page is one of those write-protected "shared pages" or "merged pages"
* which KSM maps into multiple mms, wherever identical anonymous page content
* is found in VM_MERGEABLE vmas. It's a PageAnon page, with NULL anon_vma.
*/
static inline int PageKsm(struct page *page)
{
return ((unsigned long)page->mapping == PAGE_MAPPING_ANON);
}
/*
* But we have to avoid the checking which page_add_anon_rmap() performs.
*/
static inline void page_add_ksm_rmap(struct page *page)
{
if (atomic_inc_and_test(&page->_mapcount)) {
page->mapping = (void *) PAGE_MAPPING_ANON;
__inc_zone_page_state(page, NR_ANON_PAGES);
}
}
#else /* !CONFIG_KSM */ #else /* !CONFIG_KSM */
static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
...@@ -45,6 +67,13 @@ static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) ...@@ -45,6 +67,13 @@ static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
static inline void ksm_exit(struct mm_struct *mm) static inline void ksm_exit(struct mm_struct *mm)
{ {
} }
static inline int PageKsm(struct page *page)
{
return 0;
}
/* No stub required for page_add_ksm_rmap(page) */
#endif /* !CONFIG_KSM */ #endif /* !CONFIG_KSM */
#endif #endif
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/ksm.h>
#include <linux/rmap.h> #include <linux/rmap.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/delayacct.h> #include <linux/delayacct.h>
...@@ -1974,7 +1975,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1974,7 +1975,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
* Take out anonymous pages first, anonymous shared vmas are * Take out anonymous pages first, anonymous shared vmas are
* not dirty accountable. * not dirty accountable.
*/ */
if (PageAnon(old_page)) { if (PageAnon(old_page) && !PageKsm(old_page)) {
if (!trylock_page(old_page)) { if (!trylock_page(old_page)) {
page_cache_get(old_page); page_cache_get(old_page);
pte_unmap_unlock(page_table, ptl); pte_unmap_unlock(page_table, ptl);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册