提交 bab247ff 编写于 作者: H Heiko Carstens 提交者: Martin Schwidefsky

s390/vmem: simplify vmem code for read-only mappings

For the kernel identity mapping map everything read-writeable and
subsequently call set_memory_ro() to make the ro section read-only.
This simplifies the code a lot.
Signed-off-by: NHeiko Carstens <heiko.carstens@de.ibm.com>
Acked-by: NMartin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: NMartin Schwidefsky <schwidefsky@de.ibm.com>
上级 e8a97e42
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/hugetlb.h> #include <linux/hugetlb.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#include <asm/cacheflush.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/setup.h> #include <asm/setup.h>
...@@ -77,7 +78,7 @@ pte_t __ref *vmem_pte_alloc(void) ...@@ -77,7 +78,7 @@ pte_t __ref *vmem_pte_alloc(void)
/* /*
* Add a physical memory range to the 1:1 mapping. * Add a physical memory range to the 1:1 mapping.
*/ */
static int vmem_add_mem(unsigned long start, unsigned long size, int ro) static int vmem_add_mem(unsigned long start, unsigned long size)
{ {
unsigned long end = start + size; unsigned long end = start + size;
unsigned long address = start; unsigned long address = start;
...@@ -99,8 +100,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) ...@@ -99,8 +100,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address && if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
!(address & ~PUD_MASK) && (address + PUD_SIZE <= end) && !(address & ~PUD_MASK) && (address + PUD_SIZE <= end) &&
!debug_pagealloc_enabled()) { !debug_pagealloc_enabled()) {
pud_val(*pu_dir) = address | pud_val(*pu_dir) = address | pgprot_val(REGION3_KERNEL);
pgprot_val(ro ? REGION3_KERNEL_RO : REGION3_KERNEL);
address += PUD_SIZE; address += PUD_SIZE;
continue; continue;
} }
...@@ -114,8 +114,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) ...@@ -114,8 +114,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address && if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
!(address & ~PMD_MASK) && (address + PMD_SIZE <= end) && !(address & ~PMD_MASK) && (address + PMD_SIZE <= end) &&
!debug_pagealloc_enabled()) { !debug_pagealloc_enabled()) {
pmd_val(*pm_dir) = address | pmd_val(*pm_dir) = address | pgprot_val(SEGMENT_KERNEL);
pgprot_val(ro ? SEGMENT_KERNEL_RO : SEGMENT_KERNEL);
address += PMD_SIZE; address += PMD_SIZE;
continue; continue;
} }
...@@ -127,8 +126,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) ...@@ -127,8 +126,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
} }
pt_dir = pte_offset_kernel(pm_dir, address); pt_dir = pte_offset_kernel(pm_dir, address);
pte_val(*pt_dir) = address | pte_val(*pt_dir) = address | pgprot_val(PAGE_KERNEL);
pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
address += PAGE_SIZE; address += PAGE_SIZE;
} }
ret = 0; ret = 0;
...@@ -338,7 +336,7 @@ int vmem_add_mapping(unsigned long start, unsigned long size) ...@@ -338,7 +336,7 @@ int vmem_add_mapping(unsigned long start, unsigned long size)
if (ret) if (ret)
goto out_free; goto out_free;
ret = vmem_add_mem(start, size, 0); ret = vmem_add_mem(start, size);
if (ret) if (ret)
goto out_remove; goto out_remove;
goto out; goto out;
...@@ -361,29 +359,12 @@ void __init vmem_map_init(void) ...@@ -361,29 +359,12 @@ void __init vmem_map_init(void)
{ {
unsigned long ro_start, ro_end; unsigned long ro_start, ro_end;
struct memblock_region *reg; struct memblock_region *reg;
phys_addr_t start, end;
for_each_memblock(memory, reg)
vmem_add_mem(reg->base, reg->size);
ro_start = PFN_ALIGN((unsigned long)&_stext); ro_start = PFN_ALIGN((unsigned long)&_stext);
ro_end = (unsigned long)&_eshared & PAGE_MASK; ro_end = (unsigned long)&_eshared & PAGE_MASK;
for_each_memblock(memory, reg) { set_memory_ro(ro_start, (ro_end - ro_start) >> PAGE_SHIFT);
start = reg->base;
end = reg->base + reg->size;
if (start >= ro_end || end <= ro_start)
vmem_add_mem(start, end - start, 0);
else if (start >= ro_start && end <= ro_end)
vmem_add_mem(start, end - start, 1);
else if (start >= ro_start) {
vmem_add_mem(start, ro_end - start, 1);
vmem_add_mem(ro_end, end - ro_end, 0);
} else if (end < ro_end) {
vmem_add_mem(start, ro_start - start, 0);
vmem_add_mem(ro_start, end - ro_start, 1);
} else {
vmem_add_mem(start, ro_start - start, 0);
vmem_add_mem(ro_start, ro_end - ro_start, 1);
vmem_add_mem(ro_end, end - ro_end, 0);
}
}
} }
/* /*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册