提交 580e0ad2 编写于 作者: Y Yinghai Lu 提交者: H. Peter Anvin

core: Move early_res from arch/x86 to kernel/

This makes the range reservation feature available to other
architectures.

-v2: add get_max_mapped, max_pfn_mapped only defined in x86...
     to fix PPC compiling
-v3: according to hpa, add CONFIG_HAVE_EARLY_RES
-v4: fix typo about EARLY_RES in config
Signed-off-by: NYinghai Lu <yinghai@kernel.org>
LKML-Reference: <4B7B5723.4070009@kernel.org>
Signed-off-by: NH. Peter Anvin <hpa@zytor.com>
上级 dd645cee
...@@ -183,6 +183,9 @@ config ARCH_SUPPORTS_OPTIMIZED_INLINING ...@@ -183,6 +183,9 @@ config ARCH_SUPPORTS_OPTIMIZED_INLINING
config ARCH_SUPPORTS_DEBUG_PAGEALLOC config ARCH_SUPPORTS_DEBUG_PAGEALLOC
def_bool y def_bool y
config HAVE_EARLY_RES
def_bool y
config HAVE_INTEL_TXT config HAVE_INTEL_TXT
def_bool y def_bool y
depends on EXPERIMENTAL && DMAR && ACPI depends on EXPERIMENTAL && DMAR && ACPI
......
...@@ -112,7 +112,7 @@ extern unsigned long end_user_pfn; ...@@ -112,7 +112,7 @@ extern unsigned long end_user_pfn;
extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align); extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align);
extern u64 find_e820_area_size(u64 start, u64 *sizep, u64 align); extern u64 find_e820_area_size(u64 start, u64 *sizep, u64 align);
extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align); extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
#include <asm/early_res.h> #include <linux/early_res.h>
extern unsigned long e820_end_of_ram_pfn(void); extern unsigned long e820_end_of_ram_pfn(void);
extern unsigned long e820_end_of_low_ram_pfn(void); extern unsigned long e820_end_of_low_ram_pfn(void);
......
...@@ -38,7 +38,7 @@ obj-$(CONFIG_X86_32) += probe_roms_32.o ...@@ -38,7 +38,7 @@ obj-$(CONFIG_X86_32) += probe_roms_32.o
obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o
obj-y += bootflag.o e820.o early_res.o obj-y += bootflag.o e820.o
obj-y += pci-dma.o quirks.o i8237.o topology.o kdebugfs.o obj-y += pci-dma.o quirks.o i8237.o topology.o kdebugfs.o
obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o
obj-y += tsc.o io_delay.o rtc.o obj-y += tsc.o io_delay.o rtc.o
......
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
#include <linux/firmware-map.h> #include <linux/firmware-map.h>
#include <asm/e820.h> #include <asm/e820.h>
#include <asm/early_res.h>
#include <asm/proto.h> #include <asm/proto.h>
#include <asm/setup.h> #include <asm/setup.h>
...@@ -752,6 +751,15 @@ u64 __init find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align) ...@@ -752,6 +751,15 @@ u64 __init find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align)
{ {
return find_e820_area(start, end, size, align); return find_e820_area(start, end, size, align);
} }
u64 __init get_max_mapped(void)
{
u64 end = max_pfn_mapped;
end <<= PAGE_SHIFT;
return end;
}
/* /*
* Find next free range after *start * Find next free range after *start
*/ */
......
#ifndef _ASM_X86_EARLY_RES_H #ifndef _LINUX_EARLY_RES_H
#define _ASM_X86_EARLY_RES_H #define _LINUX_EARLY_RES_H
#ifdef __KERNEL__ #ifdef __KERNEL__
extern void reserve_early(u64 start, u64 end, char *name); extern void reserve_early(u64 start, u64 end, char *name);
...@@ -13,9 +13,10 @@ u64 find_early_area(u64 ei_start, u64 ei_last, u64 start, u64 end, ...@@ -13,9 +13,10 @@ u64 find_early_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
u64 find_early_area_size(u64 ei_start, u64 ei_last, u64 start, u64 find_early_area_size(u64 ei_start, u64 ei_last, u64 start,
u64 *sizep, u64 align); u64 *sizep, u64 align);
u64 find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align); u64 find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align);
u64 get_max_mapped(void);
#include <linux/range.h> #include <linux/range.h>
int get_free_all_memory_range(struct range **rangep, int nodeid); int get_free_all_memory_range(struct range **rangep, int nodeid);
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_X86_EARLY_RES_H */ #endif /* _LINUX_EARLY_RES_H */
...@@ -11,6 +11,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \ ...@@ -11,6 +11,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \
hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \ notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \
async.o range.o async.o range.o
obj-$(CONFIG_HAVE_EARLY_RES) += early_res.o
obj-y += groups.o obj-y += groups.o
ifdef CONFIG_FUNCTION_TRACER ifdef CONFIG_FUNCTION_TRACER
......
...@@ -6,8 +6,7 @@ ...@@ -6,8 +6,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/early_res.h>
#include <asm/early_res.h>
/* /*
* Early reserved memory areas. * Early reserved memory areas.
...@@ -178,13 +177,6 @@ void __init reserve_early_overlap_ok(u64 start, u64 end, char *name) ...@@ -178,13 +177,6 @@ void __init reserve_early_overlap_ok(u64 start, u64 end, char *name)
__reserve_early(start, end, name, 1); __reserve_early(start, end, name, 1);
} }
u64 __init __weak find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align)
{
panic("should have find_fw_memmap_area defined with arch");
return -1ULL;
}
static void __init __check_and_double_early_res(u64 ex_start, u64 ex_end) static void __init __check_and_double_early_res(u64 ex_start, u64 ex_end)
{ {
u64 start, end, size, mem; u64 start, end, size, mem;
...@@ -207,7 +199,7 @@ static void __init __check_and_double_early_res(u64 ex_start, u64 ex_end) ...@@ -207,7 +199,7 @@ static void __init __check_and_double_early_res(u64 ex_start, u64 ex_end)
sizeof(struct early_res)); sizeof(struct early_res));
if (mem == -1ULL) { if (mem == -1ULL) {
start = ex_end; start = ex_end;
end = max_pfn_mapped << PAGE_SHIFT; end = get_max_mapped();
if (start + size < end) if (start + size < end)
mem = find_fw_memmap_area(start, end, size, mem = find_fw_memmap_area(start, end, size,
sizeof(struct early_res)); sizeof(struct early_res));
...@@ -343,11 +335,11 @@ int __init get_free_all_memory_range(struct range **rangep, int nodeid) ...@@ -343,11 +335,11 @@ int __init get_free_all_memory_range(struct range **rangep, int nodeid)
count *= 2; count *= 2;
size = sizeof(struct range) * count; size = sizeof(struct range) * count;
end = get_max_mapped();
#ifdef MAX_DMA32_PFN #ifdef MAX_DMA32_PFN
if (max_pfn_mapped > MAX_DMA32_PFN) if (end > (MAX_DMA32_PFN << PAGE_SHIFT))
start = MAX_DMA32_PFN << PAGE_SHIFT; start = MAX_DMA32_PFN << PAGE_SHIFT;
#endif #endif
end = max_pfn_mapped << PAGE_SHIFT;
mem = find_fw_memmap_area(start, end, size, sizeof(struct range)); mem = find_fw_memmap_area(start, end, size, sizeof(struct range));
if (mem == -1ULL) if (mem == -1ULL)
panic("can not find more space for range free"); panic("can not find more space for range free");
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册