提交 b9af7c0d 编写于 作者: S Suresh Siddha 提交者: H. Peter Anvin

x86-64: preserve large page mapping for 1st 2MB kernel txt with CONFIG_DEBUG_RODATA

In the first 2MB, kernel text is co-located with kernel static
page tables setup by head_64.S.  CONFIG_DEBUG_RODATA chops this
2MB large page mapping to small 4KB pages as we mark the kernel text as RO,
leaving the static page tables as RW.

With CONFIG_DEBUG_RODATA disabled, OLTP run on NHM-EP shows 1% improvement
with 2% reduction in system time and 1% improvement in iowait idle time.

To recover this, move the kernel static page tables to .data section, so that
we don't have to break the first 2MB of kernel text to small pages with
CONFIG_DEBUG_RODATA.
Signed-off-by: NSuresh Siddha <suresh.b.siddha@intel.com>
LKML-Reference: <20091014220254.063193621@sbs-t61.sc.intel.com>
Signed-off-by: NH. Peter Anvin <hpa@zytor.com>
上级 adc19389
...@@ -262,11 +262,11 @@ ENTRY(secondary_startup_64) ...@@ -262,11 +262,11 @@ ENTRY(secondary_startup_64)
.quad x86_64_start_kernel .quad x86_64_start_kernel
ENTRY(initial_gs) ENTRY(initial_gs)
.quad INIT_PER_CPU_VAR(irq_stack_union) .quad INIT_PER_CPU_VAR(irq_stack_union)
__FINITDATA
ENTRY(stack_start) ENTRY(stack_start)
.quad init_thread_union+THREAD_SIZE-8 .quad init_thread_union+THREAD_SIZE-8
.word 0 .word 0
__FINITDATA
bad_address: bad_address:
jmp bad_address jmp bad_address
...@@ -340,6 +340,7 @@ ENTRY(name) ...@@ -340,6 +340,7 @@ ENTRY(name)
i = i + 1 ; \ i = i + 1 ; \
.endr .endr
.data
/* /*
* This default setting generates an ident mapping at address 0x100000 * This default setting generates an ident mapping at address 0x100000
* and a mapping for the kernel that precisely maps virtual address * and a mapping for the kernel that precisely maps virtual address
......
...@@ -699,7 +699,7 @@ static int kernel_set_to_readonly; ...@@ -699,7 +699,7 @@ static int kernel_set_to_readonly;
void set_kernel_text_rw(void) void set_kernel_text_rw(void)
{ {
unsigned long start = PFN_ALIGN(_stext); unsigned long start = PFN_ALIGN(_text);
unsigned long end = PFN_ALIGN(__start_rodata); unsigned long end = PFN_ALIGN(__start_rodata);
if (!kernel_set_to_readonly) if (!kernel_set_to_readonly)
...@@ -713,7 +713,7 @@ void set_kernel_text_rw(void) ...@@ -713,7 +713,7 @@ void set_kernel_text_rw(void)
void set_kernel_text_ro(void) void set_kernel_text_ro(void)
{ {
unsigned long start = PFN_ALIGN(_stext); unsigned long start = PFN_ALIGN(_text);
unsigned long end = PFN_ALIGN(__start_rodata); unsigned long end = PFN_ALIGN(__start_rodata);
if (!kernel_set_to_readonly) if (!kernel_set_to_readonly)
...@@ -727,7 +727,7 @@ void set_kernel_text_ro(void) ...@@ -727,7 +727,7 @@ void set_kernel_text_ro(void)
void mark_rodata_ro(void) void mark_rodata_ro(void)
{ {
unsigned long start = PFN_ALIGN(_stext), end = PFN_ALIGN(__end_rodata); unsigned long start = PFN_ALIGN(_text), end = PFN_ALIGN(__end_rodata);
unsigned long rodata_start = unsigned long rodata_start =
((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK; ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册