提交 54c29c63 编写于 作者: S Stanislaw Gruszka 提交者: Ingo Molnar

mm, x86: Remove debug_pagealloc_enabled

When (no)bootmem finish operation, it pass pages to buddy
allocator. Since debug_pagealloc_enabled is not set, we will do
not protect pages, what is not what we want with
CONFIG_DEBUG_PAGEALLOC=y.

To fix remove debug_pagealloc_enabled. That variable was
introduced by commit 12d6f21e "x86: do not PSE on
CONFIG_DEBUG_PAGEALLOC=y" to get more CPA (change page
attribude) code testing. But currently we have CONFIG_CPA_DEBUG,
which test CPA.
Signed-off-by: NStanislaw Gruszka <sgruszka@redhat.com>
Acked-by: NMel Gorman <mgorman@suse.de>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/1322582711-14571-1-git-send-email-sgruszka@redhat.comSigned-off-by: NIngo Molnar <mingo@elte.hu>
上级 855c743a
...@@ -1333,12 +1333,6 @@ void kernel_map_pages(struct page *page, int numpages, int enable) ...@@ -1333,12 +1333,6 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
numpages * PAGE_SIZE); numpages * PAGE_SIZE);
} }
/*
* If page allocator is not up yet then do not call c_p_a():
*/
if (!debug_pagealloc_enabled)
return;
/* /*
* The return value is ignored as the calls cannot fail. * The return value is ignored as the calls cannot fail.
* Large pages for identity mappings are not used at boot time * Large pages for identity mappings are not used at boot time
......
...@@ -1537,23 +1537,13 @@ static inline void vm_stat_account(struct mm_struct *mm, ...@@ -1537,23 +1537,13 @@ static inline void vm_stat_account(struct mm_struct *mm,
#endif /* CONFIG_PROC_FS */ #endif /* CONFIG_PROC_FS */
#ifdef CONFIG_DEBUG_PAGEALLOC #ifdef CONFIG_DEBUG_PAGEALLOC
extern int debug_pagealloc_enabled;
extern void kernel_map_pages(struct page *page, int numpages, int enable); extern void kernel_map_pages(struct page *page, int numpages, int enable);
static inline void enable_debug_pagealloc(void)
{
debug_pagealloc_enabled = 1;
}
#ifdef CONFIG_HIBERNATION #ifdef CONFIG_HIBERNATION
extern bool kernel_page_present(struct page *page); extern bool kernel_page_present(struct page *page);
#endif /* CONFIG_HIBERNATION */ #endif /* CONFIG_HIBERNATION */
#else #else
static inline void static inline void
kernel_map_pages(struct page *page, int numpages, int enable) {} kernel_map_pages(struct page *page, int numpages, int enable) {}
static inline void enable_debug_pagealloc(void)
{
}
#ifdef CONFIG_HIBERNATION #ifdef CONFIG_HIBERNATION
static inline bool kernel_page_present(struct page *page) { return true; } static inline bool kernel_page_present(struct page *page) { return true; }
#endif /* CONFIG_HIBERNATION */ #endif /* CONFIG_HIBERNATION */
......
...@@ -282,10 +282,6 @@ static int __init unknown_bootoption(char *param, char *val) ...@@ -282,10 +282,6 @@ static int __init unknown_bootoption(char *param, char *val)
return 0; return 0;
} }
#ifdef CONFIG_DEBUG_PAGEALLOC
int __read_mostly debug_pagealloc_enabled = 0;
#endif
static int __init init_setup(char *str) static int __init init_setup(char *str)
{ {
unsigned int i; unsigned int i;
...@@ -597,7 +593,6 @@ asmlinkage void __init start_kernel(void) ...@@ -597,7 +593,6 @@ asmlinkage void __init start_kernel(void)
} }
#endif #endif
page_cgroup_init(); page_cgroup_init();
enable_debug_pagealloc();
debug_objects_mem_init(); debug_objects_mem_init();
kmemleak_init(); kmemleak_init();
setup_per_cpu_pageset(); setup_per_cpu_pageset();
......
...@@ -95,9 +95,6 @@ static void unpoison_pages(struct page *page, int n) ...@@ -95,9 +95,6 @@ static void unpoison_pages(struct page *page, int n)
void kernel_map_pages(struct page *page, int numpages, int enable) void kernel_map_pages(struct page *page, int numpages, int enable)
{ {
if (!debug_pagealloc_enabled)
return;
if (enable) if (enable)
unpoison_pages(page, numpages); unpoison_pages(page, numpages);
else else
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册