提交 83a92529 编写于 作者: M Michal Simek

microblaze: mm: Fix lowmem max memory size limits

Use CONFIG_LOWMEM_SIZE if system has larger ram size.
For system with larger ram size, enable HIGMEM support.

Also setup limitation for memblock and use memblock
allocation in lowmem region.
Signed-off-by: NMichal Simek <monstr@monstr.eu>
上级 4e2e4124
...@@ -135,7 +135,6 @@ extern unsigned long min_low_pfn; ...@@ -135,7 +135,6 @@ extern unsigned long min_low_pfn;
extern unsigned long max_pfn; extern unsigned long max_pfn;
extern unsigned long memory_start; extern unsigned long memory_start;
extern unsigned long memory_end;
extern unsigned long memory_size; extern unsigned long memory_size;
extern int page_is_ram(unsigned long pfn); extern int page_is_ram(unsigned long pfn);
......
...@@ -94,8 +94,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } ...@@ -94,8 +94,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
/* Start and end of the vmalloc area. */ /* Start and end of the vmalloc area. */
/* Make sure to map the vmalloc area above the pinned kernel memory area /* Make sure to map the vmalloc area above the pinned kernel memory area
of 32Mb. */ of 32Mb. */
#define VMALLOC_START (CONFIG_KERNEL_START + \ #define VMALLOC_START (CONFIG_KERNEL_START + CONFIG_LOWMEM_SIZE)
max(32 * 1024 * 1024UL, memory_size))
#define VMALLOC_END ioremap_bot #define VMALLOC_END ioremap_bot
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -80,7 +80,7 @@ extern unsigned long search_exception_table(unsigned long); ...@@ -80,7 +80,7 @@ extern unsigned long search_exception_table(unsigned long);
static inline int ___range_ok(unsigned long addr, unsigned long size) static inline int ___range_ok(unsigned long addr, unsigned long size)
{ {
return ((addr < memory_start) || return ((addr < memory_start) ||
((addr + size) > memory_end)); ((addr + size - 1) > (memory_start + memory_size - 1)));
} }
#define __range_ok(addr, size) \ #define __range_ok(addr, size) \
......
...@@ -44,9 +44,9 @@ char *klimit = _end; ...@@ -44,9 +44,9 @@ char *klimit = _end;
*/ */
unsigned long memory_start; unsigned long memory_start;
EXPORT_SYMBOL(memory_start); EXPORT_SYMBOL(memory_start);
unsigned long memory_end; /* due to mm/nommu.c */
unsigned long memory_size; unsigned long memory_size;
EXPORT_SYMBOL(memory_size); EXPORT_SYMBOL(memory_size);
unsigned long lowmem_size;
/* /*
* paging_init() sets up the page tables - in fact we've already done this. * paging_init() sets up the page tables - in fact we've already done this.
...@@ -58,7 +58,7 @@ static void __init paging_init(void) ...@@ -58,7 +58,7 @@ static void __init paging_init(void)
/* Clean every zones */ /* Clean every zones */
memset(zones_size, 0, sizeof(zones_size)); memset(zones_size, 0, sizeof(zones_size));
zones_size[ZONE_DMA] = max_mapnr; zones_size[ZONE_DMA] = max_pfn;
free_area_init(zones_size); free_area_init(zones_size);
} }
...@@ -74,32 +74,31 @@ void __init setup_memory(void) ...@@ -74,32 +74,31 @@ void __init setup_memory(void)
/* Find main memory where is the kernel */ /* Find main memory where is the kernel */
for_each_memblock(memory, reg) { for_each_memblock(memory, reg) {
memory_start = (u32)reg->base; memory_start = (u32)reg->base;
memory_end = (u32) reg->base + reg->size; lowmem_size = reg->size;
if ((memory_start <= (u32)_text) && if ((memory_start <= (u32)_text) &&
((u32)_text <= memory_end)) { ((u32)_text <= (memory_start + lowmem_size - 1))) {
memory_size = memory_end - memory_start; memory_size = lowmem_size;
PAGE_OFFSET = memory_start; PAGE_OFFSET = memory_start;
printk(KERN_INFO "%s: Main mem: 0x%x-0x%x, " printk(KERN_INFO "%s: Main mem: 0x%x, "
"size 0x%08x\n", __func__, (u32) memory_start, "size 0x%08x\n", __func__, (u32) memory_start,
(u32) memory_end, (u32) memory_size); (u32) memory_size);
break; break;
} }
} }
if (!memory_start || !memory_end) { if (!memory_start || !memory_size) {
panic("%s: Missing memory setting 0x%08x-0x%08x\n", panic("%s: Missing memory setting 0x%08x, size=0x%08x\n",
__func__, (u32) memory_start, (u32) memory_end); __func__, (u32) memory_start, (u32) memory_size);
} }
/* reservation of region where is the kernel */ /* reservation of region where is the kernel */
kernel_align_start = PAGE_DOWN((u32)_text); kernel_align_start = PAGE_DOWN((u32)_text);
/* ALIGN can be remove because _end in vmlinux.lds.S is align */ /* ALIGN can be remove because _end in vmlinux.lds.S is align */
kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start; kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start;
memblock_reserve(kernel_align_start, kernel_align_size); printk(KERN_INFO "%s: kernel addr:0x%08x-0x%08x size=0x%08x\n",
printk(KERN_INFO "%s: kernel addr=0x%08x-0x%08x size=0x%08x\n",
__func__, kernel_align_start, kernel_align_start __func__, kernel_align_start, kernel_align_start
+ kernel_align_size, kernel_align_size); + kernel_align_size, kernel_align_size);
memblock_reserve(kernel_align_start, kernel_align_size);
#endif #endif
/* /*
* Kernel: * Kernel:
...@@ -116,11 +115,13 @@ void __init setup_memory(void) ...@@ -116,11 +115,13 @@ void __init setup_memory(void)
min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */ min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */
/* RAM is assumed contiguous */ /* RAM is assumed contiguous */
num_physpages = max_mapnr = memory_size >> PAGE_SHIFT; num_physpages = max_mapnr = memory_size >> PAGE_SHIFT;
max_pfn = max_low_pfn = memory_end >> PAGE_SHIFT; max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT;
max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT;
printk(KERN_INFO "%s: max_mapnr: %#lx\n", __func__, max_mapnr); printk(KERN_INFO "%s: max_mapnr: %#lx\n", __func__, max_mapnr);
printk(KERN_INFO "%s: min_low_pfn: %#lx\n", __func__, min_low_pfn); printk(KERN_INFO "%s: min_low_pfn: %#lx\n", __func__, min_low_pfn);
printk(KERN_INFO "%s: max_low_pfn: %#lx\n", __func__, max_low_pfn); printk(KERN_INFO "%s: max_low_pfn: %#lx\n", __func__, max_low_pfn);
printk(KERN_INFO "%s: max_pfn: %#lx\n", __func__, max_pfn);
/* /*
* Find an area to use for the bootmem bitmap. * Find an area to use for the bootmem bitmap.
...@@ -134,14 +135,25 @@ void __init setup_memory(void) ...@@ -134,14 +135,25 @@ void __init setup_memory(void)
memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size); memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size);
/* free bootmem is whole main memory */ /* free bootmem is whole main memory */
free_bootmem(memory_start, memory_size); free_bootmem(memory_start, lowmem_size);
/* reserve allocate blocks */ /* reserve allocate blocks */
for_each_memblock(reserved, reg) { for_each_memblock(reserved, reg) {
pr_debug("reserved - 0x%08x-0x%08x\n", unsigned long top = reg->base + reg->size - 1;
(u32) reg->base, (u32) reg->size);
pr_debug("reserved - 0x%08x-0x%08x, %lx, %lx\n",
(u32) reg->base, (u32) reg->size, top,
memory_start + lowmem_size - 1);
if (top <= (memory_start + lowmem_size - 1)) {
reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
} else if (reg->base < (memory_start + lowmem_size - 1)) {
unsigned long trunc_size = memory_start + lowmem_size -
reg->base;
reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT);
}
} }
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
init_bootmem_done = 1; init_bootmem_done = 1;
#endif #endif
...@@ -186,7 +198,8 @@ void free_initmem(void) ...@@ -186,7 +198,8 @@ void free_initmem(void)
void __init mem_init(void) void __init mem_init(void)
{ {
high_memory = (void *)__va(memory_end); high_memory = (void *)__va(memory_start + lowmem_size - 1);
/* this will put all memory onto the freelists */ /* this will put all memory onto the freelists */
totalram_pages += free_all_bootmem(); totalram_pages += free_all_bootmem();
...@@ -222,7 +235,6 @@ static void mm_cmdline_setup(void) ...@@ -222,7 +235,6 @@ static void mm_cmdline_setup(void)
maxmem = memparse(p, &p); maxmem = memparse(p, &p);
if (maxmem && memory_size > maxmem) { if (maxmem && memory_size > maxmem) {
memory_size = maxmem; memory_size = maxmem;
memory_end = memory_start + memory_size;
memblock.memory.regions[0].size = memory_size; memblock.memory.regions[0].size = memory_size;
} }
} }
...@@ -272,9 +284,12 @@ asmlinkage void __init mmu_init(void) ...@@ -272,9 +284,12 @@ asmlinkage void __init mmu_init(void)
} }
/* Find main memory where the kernel is */ /* Find main memory where the kernel is */
memory_start = (u32) memblock.memory.regions[0].base; memory_start = (u32) memblock.memory.regions[0].base;
memory_end = (u32) memblock.memory.regions[0].base + lowmem_size = memory_size = (u32) memblock.memory.regions[0].size;
(u32) memblock.memory.regions[0].size;
memory_size = memory_end - memory_start; if (lowmem_size > CONFIG_LOWMEM_SIZE) {
lowmem_size = CONFIG_LOWMEM_SIZE;
memory_size = lowmem_size;
}
mm_cmdline_setup(); /* FIXME parse args from command line - not used */ mm_cmdline_setup(); /* FIXME parse args from command line - not used */
...@@ -307,9 +322,13 @@ asmlinkage void __init mmu_init(void) ...@@ -307,9 +322,13 @@ asmlinkage void __init mmu_init(void)
ioremap_base = 0xfe000000UL; /* for now, could be 0xfffff000 */ ioremap_base = 0xfe000000UL; /* for now, could be 0xfffff000 */
#endif /* CONFIG_HIGHMEM_START_BOOL */ #endif /* CONFIG_HIGHMEM_START_BOOL */
ioremap_bot = ioremap_base; ioremap_bot = ioremap_base;
/* Initialize the context management stuff */ /* Initialize the context management stuff */
mmu_context_init(); mmu_context_init();
/* Shortly after that, the entire linear mapping will be available */
/* This will also cause that unflatten device tree will be allocated
* inside 768MB limit */
memblock_set_current_limit(memory_start + lowmem_size - 1);
} }
/* This is only called until mem_init is done. */ /* This is only called until mem_init is done. */
......
...@@ -44,11 +44,6 @@ unsigned long ioremap_base; ...@@ -44,11 +44,6 @@ unsigned long ioremap_base;
unsigned long ioremap_bot; unsigned long ioremap_bot;
EXPORT_SYMBOL(ioremap_bot); EXPORT_SYMBOL(ioremap_bot);
/* The maximum lowmem defaults to 768Mb, but this can be configured to
* another value.
*/
#define MAX_LOW_MEM CONFIG_LOWMEM_SIZE
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
struct pgtable_cache_struct quicklists; struct pgtable_cache_struct quicklists;
#endif #endif
...@@ -171,7 +166,7 @@ void __init mapin_ram(void) ...@@ -171,7 +166,7 @@ void __init mapin_ram(void)
v = CONFIG_KERNEL_START; v = CONFIG_KERNEL_START;
p = memory_start; p = memory_start;
for (s = 0; s < memory_size; s += PAGE_SIZE) { for (s = 0; s < CONFIG_LOWMEM_SIZE; s += PAGE_SIZE) {
f = _PAGE_PRESENT | _PAGE_ACCESSED | f = _PAGE_PRESENT | _PAGE_ACCESSED |
_PAGE_SHARED | _PAGE_HWEXEC; _PAGE_SHARED | _PAGE_HWEXEC;
if ((char *) v < _stext || (char *) v >= _etext) if ((char *) v < _stext || (char *) v >= _etext)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册