提交 1bcdd854 编写于 作者: H Helge Deller 提交者: Kyle McMartin

[PARISC] Add CONFIG_DEBUG_RODATA to protect read-only data

Add the parisc version of the "mark rodata section read only" patches.
Based on code from and Signed-off-by Arjan van de Ven
<arjan@infradead.org>, Ingo Molnar <mingo@elte.hu>, Andi Kleen <ak@muc.de>,
Andrew Morton <akpm@osdl.org>, Linus Torvalds <torvalds@osdl.org>.
Signed-off-by: NHelge Deller <deller@parisc-linux.org>
Signed-off-by: NKyle McMartin <kyle@parisc-linux.org>
上级 a2bb214d
......@@ -11,4 +11,14 @@ config DEBUG_RWLOCK
too many attempts. If you suspect a rwlock problem or a kernel
hacker asks for this option then say Y. Otherwise say N.
config DEBUG_RODATA
bool "Write protect kernel read-only data structures"
depends on DEBUG_KERNEL
help
Mark the kernel read-only data as write-protected in the pagetables,
in order to catch accidental (and incorrect) writes to such const
data. This option may have a slight performance impact because a
portion of the kernel code won't be covered by a TLB anymore.
If in doubt, say "N".
endmenu
......@@ -22,10 +22,9 @@
#include <linux/linkage.h>
#include <asm/unistd.h>
.text
#define ENTRY_NAME(_name_) .word _name_
.section .rodata,"a"
.align 4
.export hpux_call_table
.import hpux_unimplemented_wrapper
......
......@@ -650,6 +650,8 @@ end_linux_gateway_page:
#define LWS_ENTRY(_name_) .word (lws_##_name_ - linux_gateway_page)
#endif
.section .rodata,"a"
.align 4096
/* Light-weight-syscall table */
/* Start of lws table. */
......
......@@ -417,6 +417,19 @@ void free_initmem(void)
#endif
}
#ifdef CONFIG_DEBUG_RODATA
void mark_rodata_ro(void)
{
extern char __start_rodata, __end_rodata;
/* rodata memory was already mapped with KERNEL_RO access rights by
pagetable_init() and map_pages(). No need to do additional stuff here */
printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n",
(unsigned long)(&__end_rodata - &__start_rodata) >> 10);
}
#endif
/*
* Just an arbitrary offset to serve as a "hole" between mapping areas
* (between top of physical memory and a potential pcxl dma mapping
......@@ -685,7 +698,7 @@ static void __init pagetable_init(void)
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_end && initrd_end > mem_limit) {
printk("initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
map_pages(initrd_start, __pa(initrd_start),
initrd_end - initrd_start, PAGE_KERNEL);
}
......
......@@ -183,4 +183,10 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
__flush_cache_page(vma, vmaddr);
}
#ifdef CONFIG_DEBUG_RODATA
void mark_rodata_ro(void);
#endif
#endif /* _PARISC_CACHEFLUSH_H */
......@@ -213,7 +213,7 @@ extern void *vmalloc_start;
#define PAGE_COPY PAGE_EXECREAD
#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED)
#define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
#define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
#define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ)
#define PAGE_FLUSH __pgprot(_PAGE_FLUSH)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册