提交 f62d0f00 编写于 作者: I Ingo Molnar

x86: cpa: set_memory_notpresent()

Signed-off-by: NIngo Molnar <mingo@elte.hu>
Signed-off-by: NThomas Gleixner <tglx@linutronix.de>
上级 d806e5ee
...@@ -559,8 +559,21 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) ...@@ -559,8 +559,21 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
free_page(addr); free_page(addr);
totalram_pages++; totalram_pages++;
} }
if (addr > __START_KERNEL_map) #ifdef CONFIG_DEBUG_RODATA
global_flush_tlb(); /*
* This will make the __init pages not present and
* not executable, so that any attempt to use a
* __init function from now on will fault immediately
* rather than supriously later when memory gets reused.
*
* We only do this for DEBUG_RODATA to not break up the
* 2Mb kernel mapping just for this debug feature.
*/
if (begin >= __START_KERNEL_map) {
set_memory_np(begin, (end - begin)/PAGE_SIZE);
set_memory_nx(begin, (end - begin)/PAGE_SIZE);
}
#endif
} }
void free_initmem(void) void free_initmem(void)
......
...@@ -357,8 +357,6 @@ int change_page_attr_clear(unsigned long addr, int numpages, pgprot_t prot) ...@@ -357,8 +357,6 @@ int change_page_attr_clear(unsigned long addr, int numpages, pgprot_t prot)
return change_page_attr_addr(addr, numpages, prot); return change_page_attr_addr(addr, numpages, prot);
} }
int set_memory_uc(unsigned long addr, int numpages) int set_memory_uc(unsigned long addr, int numpages)
{ {
pgprot_t uncached; pgprot_t uncached;
...@@ -402,7 +400,6 @@ int set_memory_ro(unsigned long addr, int numpages) ...@@ -402,7 +400,6 @@ int set_memory_ro(unsigned long addr, int numpages)
pgprot_val(rw) = _PAGE_RW; pgprot_val(rw) = _PAGE_RW;
return change_page_attr_clear(addr, numpages, rw); return change_page_attr_clear(addr, numpages, rw);
} }
EXPORT_SYMBOL(set_memory_ro);
int set_memory_rw(unsigned long addr, int numpages) int set_memory_rw(unsigned long addr, int numpages)
{ {
...@@ -411,7 +408,14 @@ int set_memory_rw(unsigned long addr, int numpages) ...@@ -411,7 +408,14 @@ int set_memory_rw(unsigned long addr, int numpages)
pgprot_val(rw) = _PAGE_RW; pgprot_val(rw) = _PAGE_RW;
return change_page_attr_set(addr, numpages, rw); return change_page_attr_set(addr, numpages, rw);
} }
EXPORT_SYMBOL(set_memory_rw);
int set_memory_np(unsigned long addr, int numpages)
{
pgprot_t present;
pgprot_val(present) = _PAGE_PRESENT;
return change_page_attr_clear(addr, numpages, present);
}
int set_pages_uc(struct page *page, int numpages) int set_pages_uc(struct page *page, int numpages)
{ {
...@@ -461,7 +465,6 @@ int set_pages_ro(struct page *page, int numpages) ...@@ -461,7 +465,6 @@ int set_pages_ro(struct page *page, int numpages)
pgprot_val(rw) = _PAGE_RW; pgprot_val(rw) = _PAGE_RW;
return change_page_attr_clear(addr, numpages, rw); return change_page_attr_clear(addr, numpages, rw);
} }
EXPORT_SYMBOL(set_pages_ro);
int set_pages_rw(struct page *page, int numpages) int set_pages_rw(struct page *page, int numpages)
{ {
...@@ -471,8 +474,6 @@ int set_pages_rw(struct page *page, int numpages) ...@@ -471,8 +474,6 @@ int set_pages_rw(struct page *page, int numpages)
pgprot_val(rw) = _PAGE_RW; pgprot_val(rw) = _PAGE_RW;
return change_page_attr_set(addr, numpages, rw); return change_page_attr_set(addr, numpages, rw);
} }
EXPORT_SYMBOL(set_pages_rw);
void clflush_cache_range(void *addr, int size) void clflush_cache_range(void *addr, int size)
{ {
...@@ -503,6 +504,20 @@ void global_flush_tlb(void) ...@@ -503,6 +504,20 @@ void global_flush_tlb(void)
EXPORT_SYMBOL(global_flush_tlb); EXPORT_SYMBOL(global_flush_tlb);
#ifdef CONFIG_DEBUG_PAGEALLOC #ifdef CONFIG_DEBUG_PAGEALLOC
static int __set_pages_p(struct page *page, int numpages)
{
unsigned long addr = (unsigned long)page_address(page);
return change_page_attr_set(addr, numpages,
__pgprot(_PAGE_PRESENT | _PAGE_RW));
}
static int __set_pages_np(struct page *page, int numpages)
{
unsigned long addr = (unsigned long)page_address(page);
return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT));
}
void kernel_map_pages(struct page *page, int numpages, int enable) void kernel_map_pages(struct page *page, int numpages, int enable)
{ {
if (PageHighMem(page)) if (PageHighMem(page))
...@@ -522,7 +537,10 @@ void kernel_map_pages(struct page *page, int numpages, int enable) ...@@ -522,7 +537,10 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
* The return value is ignored - the calls cannot fail, * The return value is ignored - the calls cannot fail,
* large pages are disabled at boot time: * large pages are disabled at boot time:
*/ */
change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0)); if (enable)
__set_pages_p(page, numpages);
else
__set_pages_np(page, numpages);
/* /*
* We should perform an IPI and flush all tlbs, * We should perform an IPI and flush all tlbs,
......
...@@ -42,6 +42,7 @@ int set_memory_x(unsigned long addr, int numpages); ...@@ -42,6 +42,7 @@ int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages); int set_memory_nx(unsigned long addr, int numpages);
int set_memory_ro(unsigned long addr, int numpages); int set_memory_ro(unsigned long addr, int numpages);
int set_memory_rw(unsigned long addr, int numpages); int set_memory_rw(unsigned long addr, int numpages);
int set_memory_np(unsigned long addr, int numpages);
void clflush_cache_range(void *addr, int size); void clflush_cache_range(void *addr, int size);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册