提交 2778f620 编写于 作者: R Russell King

ARM: initial LMB trial

Acked-by: NTony Lindgren <tony@atomide.com>
Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
上级 4bb2e27d
......@@ -10,6 +10,7 @@ config ARM
default y
select HAVE_AOUT
select HAVE_IDE
select HAVE_MEMBLOCK
select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION
select GENERIC_ATOMIC64 if (!CPU_32v6K)
......
#ifndef _ASM_ARM_MEMBLOCK_H
#define _ASM_ARM_MEMBLOCK_H
#ifdef CONFIG_MMU
extern phys_addr_t lowmem_end_addr;
#define MEMBLOCK_REAL_LIMIT lowmem_end_addr
#else
#define MEMBLOCK_REAL_LIMIT 0
#endif
struct meminfo;
extern void arm_memblock_init(struct meminfo *);
#endif
......@@ -25,6 +25,7 @@
#include <linux/smp.h>
#include <linux/fs.h>
#include <linux/proc_fs.h>
#include <linux/memblock.h>
#include <asm/unified.h>
#include <asm/cpu.h>
......@@ -715,6 +716,8 @@ void __init setup_arch(char **cmdline_p)
parse_early_param();
arm_memblock_init(&meminfo);
paging_init(mdesc);
request_standard_resources(&meminfo, mdesc);
......
......@@ -17,6 +17,7 @@
#include <linux/initrd.h>
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
#include <asm/mach-types.h>
#include <asm/sections.h>
......@@ -146,95 +147,21 @@ static void __init find_limits(struct meminfo *mi,
}
}
/*
* FIXME: We really want to avoid allocating the bootmap bitmap
* over the top of the initrd. Hopefully, this is located towards
* the start of a bank, so if we allocate the bootmap bitmap at
* the end, we won't clash.
*/
static unsigned int __init
find_bootmap_pfn(struct meminfo *mi, unsigned int bootmap_pages)
{
unsigned int start_pfn, i, bootmap_pfn;
start_pfn = PAGE_ALIGN(__pa(_end)) >> PAGE_SHIFT;
bootmap_pfn = 0;
for_each_bank(i, mi) {
struct membank *bank = &mi->bank[i];
unsigned int start, end;
start = bank_pfn_start(bank);
end = bank_pfn_end(bank);
if (end < start_pfn)
continue;
if (start < start_pfn)
start = start_pfn;
if (end <= start)
continue;
if (end - start >= bootmap_pages) {
bootmap_pfn = start;
break;
}
}
if (bootmap_pfn == 0)
BUG();
return bootmap_pfn;
}
static int __init check_initrd(struct meminfo *mi)
{
int initrd = -2;
#ifdef CONFIG_BLK_DEV_INITRD
unsigned long end = phys_initrd_start + phys_initrd_size;
/*
* Make sure that the initrd is within a valid area of
* memory.
*/
if (phys_initrd_size) {
unsigned int i;
initrd = -1;
for (i = 0; i < mi->nr_banks; i++) {
struct membank *bank = &mi->bank[i];
if (bank_phys_start(bank) <= phys_initrd_start &&
end <= bank_phys_end(bank))
initrd = 0;
}
}
if (initrd == -1) {
printk(KERN_ERR "INITRD: 0x%08lx+0x%08lx extends beyond "
"physical memory - disabling initrd\n",
phys_initrd_start, phys_initrd_size);
phys_initrd_start = phys_initrd_size = 0;
}
#endif
return initrd;
}
static void __init arm_bootmem_init(struct meminfo *mi,
unsigned long start_pfn, unsigned long end_pfn)
{
unsigned long boot_pfn;
unsigned int boot_pages;
phys_addr_t bitmap;
pg_data_t *pgdat;
int i;
/*
* Allocate the bootmem bitmap page.
* Allocate the bootmem bitmap page. This must be in a region
* of memory which has already been mapped.
*/
boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
boot_pfn = find_bootmap_pfn(mi, boot_pages);
bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
__pfn_to_phys(end_pfn));
/*
* Initialise the bootmem allocator, handing the
......@@ -242,7 +169,7 @@ static void __init arm_bootmem_init(struct meminfo *mi,
*/
node_set_online(0);
pgdat = NODE_DATA(0);
init_bootmem_node(pgdat, boot_pfn, start_pfn, end_pfn);
init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
for_each_bank(i, mi) {
struct membank *bank = &mi->bank[i];
......@@ -251,30 +178,16 @@ static void __init arm_bootmem_init(struct meminfo *mi,
}
/*
* Reserve the bootmem bitmap.
* Reserve the memblock reserved regions in bootmem.
*/
reserve_bootmem(boot_pfn << PAGE_SHIFT,
boot_pages << PAGE_SHIFT, BOOTMEM_DEFAULT);
}
static void __init bootmem_reserve_initrd(void)
{
#ifdef CONFIG_BLK_DEV_INITRD
int res;
res = reserve_bootmem(phys_initrd_start,
phys_initrd_size, BOOTMEM_EXCLUSIVE);
if (res == 0) {
initrd_start = __phys_to_virt(phys_initrd_start);
initrd_end = initrd_start + phys_initrd_size;
} else {
printk(KERN_ERR
"INITRD: 0x%08lx+0x%08lx overlaps in-use "
"memory region - disabling initrd\n",
phys_initrd_start, phys_initrd_size);
for (i = 0; i < memblock.reserved.cnt; i++) {
phys_addr_t start = memblock_start_pfn(&memblock.reserved, i);
if (start >= start_pfn &&
memblock_end_pfn(&memblock.reserved, i) <= end_pfn)
reserve_bootmem_node(pgdat, __pfn_to_phys(start),
memblock_size_bytes(&memblock.reserved, i),
BOOTMEM_DEFAULT);
}
#endif
}
static void __init arm_bootmem_free(struct meminfo *mi)
......@@ -358,16 +271,40 @@ static void arm_memory_present(struct meminfo *mi)
}
#endif
void __init arm_memblock_init(struct meminfo *mi)
{
int i;
memblock_init();
for (i = 0; i < mi->nr_banks; i++)
memblock_add(mi->bank[i].start, mi->bank[i].size);
/* Register the kernel text, kernel data and initrd with memblock. */
#ifdef CONFIG_XIP_KERNEL
memblock_reserve(__pa(_data), _end - _data);
#else
memblock_reserve(__pa(_stext), _end - _stext);
#endif
#ifdef CONFIG_BLK_DEV_INITRD
if (phys_initrd_size) {
memblock_reserve(phys_initrd_start, phys_initrd_size);
/* Now convert initrd to virtual addresses */
initrd_start = __phys_to_virt(phys_initrd_start);
initrd_end = initrd_start + phys_initrd_size;
}
#endif
arm_mm_memblock_reserve();
memblock_analyze();
memblock_dump_all();
}
void __init bootmem_init(struct machine_desc *mdesc)
{
struct meminfo *mi = &meminfo;
unsigned long min, max_low, max_high;
int initrd;
/*
* Locate the ramdisk image, if any.
*/
initrd = check_initrd(mi);
max_low = max_high = 0;
......@@ -375,20 +312,9 @@ void __init bootmem_init(struct machine_desc *mdesc)
arm_bootmem_init(mi, min, max_low);
/*
* Reserve any special regions.
*/
reserve_special_regions();
if (mdesc->reserve)
mdesc->reserve();
/*
* If the initrd is present, reserve its memory.
*/
if (initrd == 0)
bootmem_reserve_initrd();
/*
* Sparsemem tries to allocate bootmem in memory_present(),
* so must be done after the fixed reservations
......
......@@ -30,4 +30,4 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
struct machine_desc;
void __init bootmem_init(struct machine_desc *);
void reserve_special_regions(void);
void arm_mm_memblock_reserve(void);
......@@ -11,9 +11,9 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/memblock.h>
#include <linux/sort.h>
#include <asm/cputype.h>
......@@ -489,7 +489,9 @@ static void __init build_mem_type_table(void)
static void __init *early_alloc(unsigned long sz)
{
return alloc_bootmem_low_pages(sz);
void *ptr = __va(memblock_alloc(sz, sz));
memset(ptr, 0, sz);
return ptr;
}
static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
......@@ -705,10 +707,14 @@ static int __init early_vmalloc(char *arg)
}
early_param("vmalloc", early_vmalloc);
phys_addr_t lowmem_end_addr;
static void __init sanity_check_meminfo(void)
{
int i, j, highmem = 0;
lowmem_end_addr = __pa(vmalloc_min - 1) + 1;
for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
struct membank *bank = &meminfo.bank[j];
*bank = meminfo.bank[i];
......@@ -834,34 +840,22 @@ static inline void prepare_page_table(void)
}
/*
* Reserve the various regions
* Reserve the special regions of memory
*/
void __init reserve_special_regions(void)
void __init arm_mm_memblock_reserve(void)
{
/*
* Register the kernel text and data with bootmem.
* Note that this can only be in node 0.
*/
#ifdef CONFIG_XIP_KERNEL
reserve_bootmem(__pa(_data), _end - _data, BOOTMEM_DEFAULT);
#else
reserve_bootmem(__pa(_stext), _end - _stext, BOOTMEM_DEFAULT);
#endif
/*
* Reserve the page tables. These are already in use,
* and can only be in node 0.
*/
reserve_bootmem(__pa(swapper_pg_dir),
PTRS_PER_PGD * sizeof(pgd_t), BOOTMEM_DEFAULT);
memblock_reserve(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t));
#ifdef CONFIG_SA1111
/*
* Because of the SA1111 DMA bug, we want to preserve our
* precious DMA-able memory...
*/
reserve_bootmem(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET,
BOOTMEM_DEFAULT);
memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
#endif
}
......@@ -1004,7 +998,6 @@ void __init paging_init(struct machine_desc *mdesc)
sanity_check_meminfo();
prepare_page_table();
map_lowmem();
bootmem_init(mdesc);
devicemaps_init(mdesc);
kmap_init();
......@@ -1012,6 +1005,9 @@ void __init paging_init(struct machine_desc *mdesc)
/* allocate the zero page. */
zero_page = early_alloc(PAGE_SIZE);
bootmem_init(mdesc);
empty_zero_page = virt_to_page(zero_page);
__flush_dcache_page(NULL, empty_zero_page);
}
......
......@@ -6,8 +6,8 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
#include <linux/io.h>
#include <linux/memblock.h>
#include <asm/cacheflush.h>
#include <asm/sections.h>
......@@ -17,27 +17,14 @@
#include "mm.h"
/*
* Reserve the various regions
*/
void __init reserve_special_regions(void)
void __init arm_mm_memblock_reserve(void)
{
/*
* Register the kernel text and data with bootmem.
* Note that this can only be in node 0.
*/
#ifdef CONFIG_XIP_KERNEL
reserve_bootmem(__pa(_data), _end - _data, BOOTMEM_DEFAULT);
#else
reserve_bootmem(__pa(_stext), _end - _stext, BOOTMEM_DEFAULT);
#endif
/*
* Register the exception vector page.
* some architectures which the DRAM is the exception vector to trap,
* alloc_page breaks with error, although it is not NULL, but "0."
*/
reserve_bootmem(CONFIG_VECTORS_BASE, PAGE_SIZE, BOOTMEM_DEFAULT);
memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE);
}
/*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册