提交 03787ff6 编写于 作者: L Linus Torvalds

Merge tag 'xtensa-next-20140503' of git://github.com/czankel/xtensa-linux

Pull Xtensa fixes from Chris Zankel:
 - Fixes allmodconfig, allnoconfig builds
 - Adds highmem support
 - Enables build-time exception table sorting.

* tag 'xtensa-next-20140503' of git://github.com/czankel/xtensa-linux:
  xtensa: ISS: don't depend on CONFIG_TTY
  xtensa: xt2000: drop redundant sysmem initialization
  xtensa: add support for KC705
  xtensa: xtfpga: introduce SoC I/O bus
  xtensa: add HIGHMEM support
  xtensa: optimize local_flush_tlb_kernel_range
  xtensa: dump sysmem from the bootmem_init
  xtensa: handle memmap kernel option
  xtensa: keep sysmem banks ordered in mem_reserve
  xtensa: keep sysmem banks ordered in add_sysmem_bank
  xtensa: split bootparam and kernel meminfo
  xtensa: enable sorting extable at build time
  xtensa: export __{invalidate,flush}_dcache_range
  xtensa: Export __invalidate_icache_range
......@@ -14,6 +14,7 @@ config XTENSA
select GENERIC_PCI_IOMAP
select ARCH_WANT_IPC_PARSE_VERSION
select ARCH_WANT_OPTIONAL_GPIOLIB
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
select IRQ_DOMAIN
select HAVE_OPROFILE
......@@ -189,6 +190,24 @@ config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
If in doubt, say Y.
config HIGHMEM
bool "High Memory Support"
help
Linux can use the full amount of RAM in the system by
default. However, the default MMUv2 setup only maps the
lowermost 128 MB of memory linearly to the areas starting
at 0xd0000000 (cached) and 0xd8000000 (uncached).
When there are more than 128 MB memory in the system not
all of it can be "permanently mapped" by the kernel.
The physical memory that's not permanently mapped is called
"high memory".
If you are compiling a kernel which will never run on a
machine with more than 128 MB total physical RAM, answer
N here.
If unsure, say Y.
endmenu
config XTENSA_CALIBRATE_CCOUNT
......@@ -224,7 +243,6 @@ choice
config XTENSA_PLATFORM_ISS
bool "ISS"
depends on TTY
select XTENSA_CALIBRATE_CCOUNT
select SERIAL_CONSOLE
help
......
/dts-v1/;
/include/ "xtfpga.dtsi"
/include/ "xtfpga-flash-128m.dtsi"
/ {
compatible = "cdns,xtensa-kc705";
memory@0 {
device_type = "memory";
reg = <0x00000000 0x08000000>;
};
};
/ {
soc {
flash: flash@00000000 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "cfi-flash";
reg = <0x00000000 0x08000000>;
bank-width = <2>;
device-width = <2>;
partition@0x0 {
label = "data";
reg = <0x00000000 0x06000000>;
};
partition@0x6000000 {
label = "boot loader area";
reg = <0x06000000 0x00800000>;
};
partition@0x6800000 {
label = "kernel image";
reg = <0x06800000 0x017e0000>;
};
partition@0x7fe0000 {
label = "boot environment";
reg = <0x07fe0000 0x00020000>;
};
};
};
};
/ {
flash: flash@f8000000 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "cfi-flash";
reg = <0xf8000000 0x01000000>;
bank-width = <2>;
device-width = <2>;
partition@0x0 {
label = "boot loader area";
reg = <0x00000000 0x00400000>;
soc {
flash: flash@08000000 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "cfi-flash";
reg = <0x08000000 0x01000000>;
bank-width = <2>;
device-width = <2>;
partition@0x0 {
label = "boot loader area";
reg = <0x00000000 0x00400000>;
};
partition@0x400000 {
label = "kernel image";
reg = <0x00400000 0x00600000>;
};
partition@0xa00000 {
label = "data";
reg = <0x00a00000 0x005e0000>;
};
partition@0xfe0000 {
label = "boot environment";
reg = <0x00fe0000 0x00020000>;
};
};
partition@0x400000 {
label = "kernel image";
reg = <0x00400000 0x00600000>;
};
partition@0xa00000 {
label = "data";
reg = <0x00a00000 0x005e0000>;
};
partition@0xfe0000 {
label = "boot environment";
reg = <0x00fe0000 0x00020000>;
};
};
};
};
/ {
flash: flash@f8000000 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "cfi-flash";
reg = <0xf8000000 0x00400000>;
bank-width = <2>;
device-width = <2>;
partition@0x0 {
label = "boot loader area";
reg = <0x00000000 0x003f0000>;
soc {
flash: flash@08000000 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "cfi-flash";
reg = <0x08000000 0x00400000>;
bank-width = <2>;
device-width = <2>;
partition@0x0 {
label = "boot loader area";
reg = <0x00000000 0x003f0000>;
};
partition@0x3f0000 {
label = "boot environment";
reg = <0x003f0000 0x00010000>;
};
};
partition@0x3f0000 {
label = "boot environment";
reg = <0x003f0000 0x00010000>;
};
};
};
};
......@@ -42,21 +42,28 @@
};
};
serial0: serial@fd050020 {
device_type = "serial";
compatible = "ns16550a";
no-loopback-test;
reg = <0xfd050020 0x20>;
reg-shift = <2>;
interrupts = <0 1>; /* external irq 0 */
clocks = <&osc>;
};
soc {
#address-cells = <1>;
#size-cells = <1>;
compatible = "simple-bus";
ranges = <0x00000000 0xf0000000 0x10000000>;
enet0: ethoc@fd030000 {
compatible = "opencores,ethoc";
reg = <0xfd030000 0x4000 0xfd800000 0x4000>;
interrupts = <1 1>; /* external irq 1 */
local-mac-address = [00 50 c2 13 6f 00];
clocks = <&osc>;
serial0: serial@0d050020 {
device_type = "serial";
compatible = "ns16550a";
no-loopback-test;
reg = <0x0d050020 0x20>;
reg-shift = <2>;
interrupts = <0 1>; /* external irq 0 */
clocks = <&osc>;
};
enet0: ethoc@0d030000 {
compatible = "opencores,ethoc";
reg = <0x0d030000 0x4000 0x0d800000 0x4000>;
interrupts = <1 1>; /* external irq 1 */
local-mac-address = [00 50 c2 13 6f 00];
clocks = <&osc>;
};
};
};
......@@ -37,23 +37,14 @@ typedef struct bp_tag {
unsigned long data[0]; /* data */
} bp_tag_t;
typedef struct meminfo {
struct bp_meminfo {
unsigned long type;
unsigned long start;
unsigned long end;
} meminfo_t;
#define SYSMEM_BANKS_MAX 5
};
#define MEMORY_TYPE_CONVENTIONAL 0x1000
#define MEMORY_TYPE_NONE 0x2000
typedef struct sysmem_info {
int nr_banks;
meminfo_t bank[SYSMEM_BANKS_MAX];
} sysmem_info_t;
extern sysmem_info_t sysmem;
#endif
#endif
/*
* fixmap.h: compile-time virtual memory allocation
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1998 Ingo Molnar
*
* Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
*/
#ifndef _ASM_FIXMAP_H
#define _ASM_FIXMAP_H
#include <asm/pgtable.h>
#ifdef CONFIG_HIGHMEM
#include <linux/threads.h>
#include <asm/kmap_types.h>
#endif
/*
* Here we define all the compile-time 'special' virtual
* addresses. The point is to have a constant address at
* compile time, but to set the physical address only
* in the boot process. We allocate these special addresses
* from the end of the consistent memory region backwards.
* Also this lets us do fail-safe vmalloc(), we
* can guarantee that these special addresses and
* vmalloc()-ed addresses never overlap.
*
* these 'compile-time allocated' memory buffers are
* fixed-size 4k pages. (or larger if used with an increment
* higher than 1) use fixmap_set(idx,phys) to associate
* physical memory with fixmap indices.
*/
enum fixed_addresses {
#ifdef CONFIG_HIGHMEM
/* reserved pte's for temporary kernel mappings */
FIX_KMAP_BEGIN,
FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
#endif
__end_of_fixed_addresses
};
#define FIXADDR_TOP (VMALLOC_START - PAGE_SIZE)
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK)
#include <asm-generic/fixmap.h>
#define kmap_get_fixmap_pte(vaddr) \
pte_offset_kernel( \
pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), \
(vaddr) \
)
#endif
......@@ -6,11 +6,54 @@
* this archive for more details.
*
* Copyright (C) 2003 - 2005 Tensilica Inc.
* Copyright (C) 2014 Cadence Design Systems Inc.
*/
#ifndef _XTENSA_HIGHMEM_H
#define _XTENSA_HIGHMEM_H
extern void flush_cache_kmaps(void);
#include <asm/cacheflush.h>
#include <asm/fixmap.h>
#include <asm/kmap_types.h>
#include <asm/pgtable.h>
#define PKMAP_BASE (FIXADDR_START - PMD_SIZE)
#define LAST_PKMAP PTRS_PER_PTE
#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
#define kmap_prot PAGE_KERNEL
extern pte_t *pkmap_page_table;
void *kmap_high(struct page *page);
void kunmap_high(struct page *page);
static inline void *kmap(struct page *page)
{
BUG_ON(in_interrupt());
if (!PageHighMem(page))
return page_address(page);
return kmap_high(page);
}
static inline void kunmap(struct page *page)
{
BUG_ON(in_interrupt());
if (!PageHighMem(page))
return;
kunmap_high(page);
}
static inline void flush_cache_kmaps(void)
{
flush_cache_all();
}
void *kmap_atomic(struct page *page);
void __kunmap_atomic(void *kvaddr);
void kmap_init(void);
#endif
......@@ -310,6 +310,10 @@ set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval)
update_pte(ptep, pteval);
}
static inline void set_pte(pte_t *ptep, pte_t pteval)
{
update_pte(ptep, pteval);
}
static inline void
set_pmd(pmd_t *pmdp, pmd_t pmdval)
......
/*
* sysmem-related prototypes.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2014 Cadence Design Systems Inc.
*/
#ifndef _XTENSA_SYSMEM_H
#define _XTENSA_SYSMEM_H
#define SYSMEM_BANKS_MAX 31
struct meminfo {
unsigned long start;
unsigned long end;
};
/*
* Bank array is sorted by .start.
* Banks don't overlap and there's at least one page gap
* between adjacent bank entries.
*/
struct sysmem_info {
int nr_banks;
struct meminfo bank[SYSMEM_BANKS_MAX];
};
extern struct sysmem_info sysmem;
int add_sysmem_bank(unsigned long start, unsigned long end);
int mem_reserve(unsigned long, unsigned long, int);
void bootmem_init(void);
void zones_init(void);
#endif /* _XTENSA_SYSMEM_H */
......@@ -36,6 +36,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma,
unsigned long page);
void local_flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
#ifdef CONFIG_SMP
......@@ -44,12 +45,7 @@ void flush_tlb_mm(struct mm_struct *);
void flush_tlb_page(struct vm_area_struct *, unsigned long);
void flush_tlb_range(struct vm_area_struct *, unsigned long,
unsigned long);
static inline void flush_tlb_kernel_range(unsigned long start,
unsigned long end)
{
flush_tlb_all();
}
void flush_tlb_kernel_range(unsigned long start, unsigned long end);
#else /* !CONFIG_SMP */
......@@ -58,7 +54,8 @@ static inline void flush_tlb_kernel_range(unsigned long start,
#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
#define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, \
end)
#define flush_tlb_kernel_range(start, end) local_flush_tlb_all()
#define flush_tlb_kernel_range(start, end) local_flush_tlb_kernel_range(start, \
end)
#endif /* CONFIG_SMP */
......
......@@ -50,6 +50,7 @@
#include <asm/param.h>
#include <asm/traps.h>
#include <asm/smp.h>
#include <asm/sysmem.h>
#include <platform/hardware.h>
......@@ -88,12 +89,6 @@ static char __initdata command_line[COMMAND_LINE_SIZE];
static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
#endif
sysmem_info_t __initdata sysmem;
extern int mem_reserve(unsigned long, unsigned long, int);
extern void bootmem_init(void);
extern void zones_init(void);
/*
* Boot parameter parsing.
*
......@@ -113,31 +108,14 @@ typedef struct tagtable {
/* parse current tag */
static int __init add_sysmem_bank(unsigned long type, unsigned long start,
unsigned long end)
{
if (sysmem.nr_banks >= SYSMEM_BANKS_MAX) {
printk(KERN_WARNING
"Ignoring memory bank 0x%08lx size %ldKB\n",
start, end - start);
return -EINVAL;
}
sysmem.bank[sysmem.nr_banks].type = type;
sysmem.bank[sysmem.nr_banks].start = PAGE_ALIGN(start);
sysmem.bank[sysmem.nr_banks].end = end & PAGE_MASK;
sysmem.nr_banks++;
return 0;
}
static int __init parse_tag_mem(const bp_tag_t *tag)
{
meminfo_t *mi = (meminfo_t *)(tag->data);
struct bp_meminfo *mi = (struct bp_meminfo *)(tag->data);
if (mi->type != MEMORY_TYPE_CONVENTIONAL)
return -1;
return add_sysmem_bank(mi->type, mi->start, mi->end);
return add_sysmem_bank(mi->start, mi->end);
}
__tagtable(BP_TAG_MEMORY, parse_tag_mem);
......@@ -146,8 +124,8 @@ __tagtable(BP_TAG_MEMORY, parse_tag_mem);
static int __init parse_tag_initrd(const bp_tag_t* tag)
{
meminfo_t* mi;
mi = (meminfo_t*)(tag->data);
struct bp_meminfo *mi = (struct bp_meminfo *)(tag->data);
initrd_start = (unsigned long)__va(mi->start);
initrd_end = (unsigned long)__va(mi->end);
......@@ -255,7 +233,7 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
return;
size &= PAGE_MASK;
add_sysmem_bank(MEMORY_TYPE_CONVENTIONAL, base, base + size);
add_sysmem_bank(base, base + size);
}
void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
......@@ -292,8 +270,6 @@ device_initcall(xtensa_device_probe);
void __init init_arch(bp_tag_t *bp_start)
{
sysmem.nr_banks = 0;
/* Parse boot parameters */
if (bp_start)
......@@ -304,10 +280,9 @@ void __init init_arch(bp_tag_t *bp_start)
#endif
if (sysmem.nr_banks == 0) {
sysmem.nr_banks = 1;
sysmem.bank[0].start = PLATFORM_DEFAULT_MEM_START;
sysmem.bank[0].end = PLATFORM_DEFAULT_MEM_START
+ PLATFORM_DEFAULT_MEM_SIZE;
add_sysmem_bank(PLATFORM_DEFAULT_MEM_START,
PLATFORM_DEFAULT_MEM_START +
PLATFORM_DEFAULT_MEM_SIZE);
}
#ifdef CONFIG_CMDLINE_BOOL
......@@ -487,7 +462,7 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start < initrd_end) {
initrd_is_mapped = mem_reserve(__pa(initrd_start),
__pa(initrd_end), 0);
__pa(initrd_end), 0) == 0;
initrd_below_start_ok = 1;
} else {
initrd_start = 0;
......@@ -532,6 +507,7 @@ void __init setup_arch(char **cmdline_p)
__pa(&_Level6InterruptVector_text_end), 0);
#endif
parse_early_param();
bootmem_init();
unflatten_and_copy_device_tree();
......
......@@ -496,6 +496,21 @@ void flush_tlb_range(struct vm_area_struct *vma,
on_each_cpu(ipi_flush_tlb_range, &fd, 1);
}
static void ipi_flush_tlb_kernel_range(void *arg)
{
struct flush_data *fd = arg;
local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
}
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
struct flush_data fd = {
.addr1 = start,
.addr2 = end,
};
on_each_cpu(ipi_flush_tlb_kernel_range, &fd, 1);
}
/* Cache flush functions */
static void ipi_flush_cache_all(void *arg)
......
......@@ -20,6 +20,7 @@
#include <linux/in6.h>
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/checksum.h>
#include <asm/dma.h>
#include <asm/io.h>
......@@ -105,6 +106,7 @@ EXPORT_SYMBOL(csum_partial_copy_generic);
* Architecture-specific symbols
*/
EXPORT_SYMBOL(__xtensa_copy_user);
EXPORT_SYMBOL(__invalidate_icache_range);
/*
* Kernel hacking ...
......@@ -127,3 +129,8 @@ EXPORT_SYMBOL(common_exception_return);
#ifdef CONFIG_FUNCTION_TRACER
EXPORT_SYMBOL(_mcount);
#endif
EXPORT_SYMBOL(__invalidate_dcache_range);
#if XCHAL_DCACHE_IS_WRITEBACK
EXPORT_SYMBOL(__flush_dcache_range);
#endif
......@@ -4,3 +4,4 @@
obj-y := init.o cache.o misc.o
obj-$(CONFIG_MMU) += fault.o mmu.o tlb.o
obj-$(CONFIG_HIGHMEM) += highmem.o
......@@ -59,6 +59,10 @@
*
*/
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && defined(CONFIG_HIGHMEM)
#error "HIGHMEM is not supported on cores with aliasing cache."
#endif
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
/*
......@@ -179,10 +183,11 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
#else
if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)
&& (vma->vm_flags & VM_EXEC) != 0) {
unsigned long paddr = (unsigned long) page_address(page);
unsigned long paddr = (unsigned long)kmap_atomic(page);
__flush_dcache_page(paddr);
__invalidate_icache_page(paddr);
set_bit(PG_arch_1, &page->flags);
kunmap_atomic((void *)paddr);
}
#endif
}
......
/*
* High memory support for Xtensa architecture
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
*
* Copyright (C) 2014 Cadence Design Systems Inc.
*/
#include <linux/export.h>
#include <linux/highmem.h>
#include <asm/tlbflush.h>
static pte_t *kmap_pte;
void *kmap_atomic(struct page *page)
{
enum fixed_addresses idx;
unsigned long vaddr;
int type;
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(*(kmap_pte - idx)));
#endif
set_pte(kmap_pte - idx, mk_pte(page, PAGE_KERNEL_EXEC));
return (void *)vaddr;
}
EXPORT_SYMBOL(kmap_atomic);
void __kunmap_atomic(void *kvaddr)
{
int idx, type;
if (kvaddr >= (void *)FIXADDR_START &&
kvaddr < (void *)FIXADDR_TOP) {
type = kmap_atomic_idx();
idx = type + KM_TYPE_NR * smp_processor_id();
/*
* Force other mappings to Oops if they'll try to access this
* pte without first remap it. Keeping stale mappings around
* is a bad idea also, in case the page changes cacheability
* attributes or becomes a protected page in a hypervisor.
*/
pte_clear(&init_mm, kvaddr, kmap_pte - idx);
local_flush_tlb_kernel_range((unsigned long)kvaddr,
(unsigned long)kvaddr + PAGE_SIZE);
kmap_atomic_idx_pop();
}
pagefault_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);
void __init kmap_init(void)
{
unsigned long kmap_vstart;
/* cache the first kmap pte */
kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
}
......@@ -8,6 +8,7 @@
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
* Copyright (C) 2014 Cadence Design Systems Inc.
*
* Chris Zankel <chris@zankel.net>
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
......@@ -19,6 +20,7 @@
#include <linux/errno.h>
#include <linux/bootmem.h>
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
......@@ -27,11 +29,133 @@
#include <asm/bootparam.h>
#include <asm/page.h>
#include <asm/sections.h>
#include <asm/sysmem.h>
struct sysmem_info sysmem __initdata;
static void __init sysmem_dump(void)
{
unsigned i;
pr_debug("Sysmem:\n");
for (i = 0; i < sysmem.nr_banks; ++i)
pr_debug(" 0x%08lx - 0x%08lx (%ldK)\n",
sysmem.bank[i].start, sysmem.bank[i].end,
(sysmem.bank[i].end - sysmem.bank[i].start) >> 10);
}
/*
* Find bank with maximal .start such that bank.start <= start
*/
static inline struct meminfo * __init find_bank(unsigned long start)
{
unsigned i;
struct meminfo *it = NULL;
for (i = 0; i < sysmem.nr_banks; ++i)
if (sysmem.bank[i].start <= start)
it = sysmem.bank + i;
else
break;
return it;
}
/*
* Move all memory banks starting at 'from' to a new place at 'to',
* adjust nr_banks accordingly.
* Both 'from' and 'to' must be inside the sysmem.bank.
*
* Returns: 0 (success), -ENOMEM (not enough space in the sysmem.bank).
*/
static int __init move_banks(struct meminfo *to, struct meminfo *from)
{
unsigned n = sysmem.nr_banks - (from - sysmem.bank);
if (to > from && to - from + sysmem.nr_banks > SYSMEM_BANKS_MAX)
return -ENOMEM;
if (to != from)
memmove(to, from, n * sizeof(struct meminfo));
sysmem.nr_banks += to - from;
return 0;
}
/*
* Add new bank to sysmem. Resulting sysmem is the union of bytes of the
* original sysmem and the new bank.
*
* Returns: 0 (success), < 0 (error)
*/
int __init add_sysmem_bank(unsigned long start, unsigned long end)
{
unsigned i;
struct meminfo *it = NULL;
unsigned long sz;
unsigned long bank_sz = 0;
if (start == end ||
(start < end) != (PAGE_ALIGN(start) < (end & PAGE_MASK))) {
pr_warn("Ignoring small memory bank 0x%08lx size: %ld bytes\n",
start, end - start);
return -EINVAL;
}
start = PAGE_ALIGN(start);
end &= PAGE_MASK;
sz = end - start;
it = find_bank(start);
if (it)
bank_sz = it->end - it->start;
if (it && bank_sz >= start - it->start) {
if (end - it->start > bank_sz)
it->end = end;
else
return 0;
} else {
if (!it)
it = sysmem.bank;
else
++it;
if (it - sysmem.bank < sysmem.nr_banks &&
it->start - start <= sz) {
it->start = start;
if (it->end - it->start < sz)
it->end = end;
else
return 0;
} else {
if (move_banks(it + 1, it) < 0) {
pr_warn("Ignoring memory bank 0x%08lx size %ld bytes\n",
start, end - start);
return -EINVAL;
}
it->start = start;
it->end = end;
return 0;
}
}
sz = it->end - it->start;
for (i = it + 1 - sysmem.bank; i < sysmem.nr_banks; ++i)
if (sysmem.bank[i].start - it->start <= sz) {
if (sz < sysmem.bank[i].end - it->start)
it->end = sysmem.bank[i].end;
} else {
break;
}
move_banks(it + 1, sysmem.bank + i);
return 0;
}
/*
* mem_reserve(start, end, must_exist)
*
* Reserve some memory from the memory pool.
* If must_exist is set and a part of the region being reserved does not exist
* memory map is not altered.
*
* Parameters:
* start Start of region,
......@@ -39,53 +163,69 @@
* must_exist Must exist in memory pool.
*
* Returns:
* 0 (memory area couldn't be mapped)
* -1 (success)
* 0 (success)
* < 0 (error)
*/
int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
{
int i;
if (start == end)
return 0;
struct meminfo *it;
struct meminfo *rm = NULL;
unsigned long sz;
unsigned long bank_sz = 0;
start = start & PAGE_MASK;
end = PAGE_ALIGN(end);
sz = end - start;
if (!sz)
return -EINVAL;
for (i = 0; i < sysmem.nr_banks; i++)
if (start < sysmem.bank[i].end
&& end >= sysmem.bank[i].start)
break;
it = find_bank(start);
if (it)
bank_sz = it->end - it->start;
if (i == sysmem.nr_banks) {
if (must_exist)
printk (KERN_WARNING "mem_reserve: [0x%0lx, 0x%0lx) "
"not in any region!\n", start, end);
return 0;
if ((!it || end - it->start > bank_sz) && must_exist) {
pr_warn("mem_reserve: [0x%0lx, 0x%0lx) not in any region!\n",
start, end);
return -EINVAL;
}
if (start > sysmem.bank[i].start) {
if (end < sysmem.bank[i].end) {
/* split entry */
if (sysmem.nr_banks >= SYSMEM_BANKS_MAX)
panic("meminfo overflow\n");
sysmem.bank[sysmem.nr_banks].start = end;
sysmem.bank[sysmem.nr_banks].end = sysmem.bank[i].end;
sysmem.nr_banks++;
if (it && start - it->start < bank_sz) {
if (start == it->start) {
if (end - it->start < bank_sz) {
it->start = end;
return 0;
} else {
rm = it;
}
} else {
it->end = start;
if (end - it->start < bank_sz)
return add_sysmem_bank(end,
it->start + bank_sz);
++it;
}
sysmem.bank[i].end = start;
}
} else if (end < sysmem.bank[i].end) {
sysmem.bank[i].start = end;
if (!it)
it = sysmem.bank;
} else {
/* remove entry */
sysmem.nr_banks--;
sysmem.bank[i].start = sysmem.bank[sysmem.nr_banks].start;
sysmem.bank[i].end = sysmem.bank[sysmem.nr_banks].end;
for (; it < sysmem.bank + sysmem.nr_banks; ++it) {
if (it->end - start <= sz) {
if (!rm)
rm = it;
} else {
if (it->start - start < sz)
it->start = end;
break;
}
}
return -1;
if (rm)
move_banks(rm, it);
return 0;
}
......@@ -99,6 +239,7 @@ void __init bootmem_init(void)
unsigned long bootmap_start, bootmap_size;
int i;
sysmem_dump();
max_low_pfn = max_pfn = 0;
min_low_pfn = ~0;
......@@ -156,19 +297,13 @@ void __init bootmem_init(void)
void __init zones_init(void)
{
unsigned long zones_size[MAX_NR_ZONES];
int i;
/* All pages are DMA-able, so we put them all in the DMA zone. */
zones_size[ZONE_DMA] = max_low_pfn - ARCH_PFN_OFFSET;
for (i = 1; i < MAX_NR_ZONES; i++)
zones_size[i] = 0;
unsigned long zones_size[MAX_NR_ZONES] = {
[ZONE_DMA] = max_low_pfn - ARCH_PFN_OFFSET,
#ifdef CONFIG_HIGHMEM
zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn;
[ZONE_HIGHMEM] = max_pfn - max_low_pfn,
#endif
};
free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL);
}
......@@ -178,16 +313,38 @@ void __init zones_init(void)
void __init mem_init(void)
{
max_mapnr = max_low_pfn - ARCH_PFN_OFFSET;
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
#ifdef CONFIG_HIGHMEM
#error HIGHGMEM not implemented in init.c
unsigned long tmp;
reset_all_zones_managed_pages();
for (tmp = max_low_pfn; tmp < max_pfn; tmp++)
free_highmem_page(pfn_to_page(tmp));
#endif
max_mapnr = max_pfn - ARCH_PFN_OFFSET;
high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT);
free_all_bootmem();
mem_init_print_info(NULL);
pr_info("virtual kernel memory layout:\n"
#ifdef CONFIG_HIGHMEM
" pkmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
" fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
#endif
" vmalloc : 0x%08x - 0x%08x (%5u MB)\n"
" lowmem : 0x%08x - 0x%08lx (%5lu MB)\n",
#ifdef CONFIG_HIGHMEM
PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE,
(LAST_PKMAP*PAGE_SIZE) >> 10,
FIXADDR_START, FIXADDR_TOP,
(FIXADDR_TOP - FIXADDR_START) >> 10,
#endif
VMALLOC_START, VMALLOC_END,
(VMALLOC_END - VMALLOC_START) >> 20,
PAGE_OFFSET, PAGE_OFFSET +
(max_low_pfn - min_low_pfn) * PAGE_SIZE,
((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20);
}
#ifdef CONFIG_BLK_DEV_INITRD
......@@ -204,3 +361,53 @@ void free_initmem(void)
{
free_initmem_default(-1);
}
static void __init parse_memmap_one(char *p)
{
char *oldp;
unsigned long start_at, mem_size;
if (!p)
return;
oldp = p;
mem_size = memparse(p, &p);
if (p == oldp)
return;
switch (*p) {
case '@':
start_at = memparse(p + 1, &p);
add_sysmem_bank(start_at, start_at + mem_size);
break;
case '$':
start_at = memparse(p + 1, &p);
mem_reserve(start_at, start_at + mem_size, 0);
break;
case 0:
mem_reserve(mem_size, 0, 0);
break;
default:
pr_warn("Unrecognized memmap syntax: %s\n", p);
break;
}
}
static int __init parse_memmap_opt(char *str)
{
while (str) {
char *k = strchr(str, ',');
if (k)
*k++ = 0;
parse_memmap_one(str);
str = k;
}
return 0;
}
early_param("memmap", parse_memmap_opt);
......@@ -3,6 +3,7 @@
*
* Extracted from init.c
*/
#include <linux/bootmem.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/string.h>
......@@ -16,9 +17,44 @@
#include <asm/initialize_mmu.h>
#include <asm/io.h>
#if defined(CONFIG_HIGHMEM)
static void * __init init_pmd(unsigned long vaddr)
{
pgd_t *pgd = pgd_offset_k(vaddr);
pmd_t *pmd = pmd_offset(pgd, vaddr);
if (pmd_none(*pmd)) {
unsigned i;
pte_t *pte = alloc_bootmem_low_pages(PAGE_SIZE);
for (i = 0; i < 1024; i++)
pte_clear(NULL, 0, pte + i);
set_pmd(pmd, __pmd(((unsigned long)pte) & PAGE_MASK));
BUG_ON(pte != pte_offset_kernel(pmd, 0));
pr_debug("%s: vaddr: 0x%08lx, pmd: 0x%p, pte: 0x%p\n",
__func__, vaddr, pmd, pte);
return pte;
} else {
return pte_offset_kernel(pmd, 0);
}
}
static void __init fixedrange_init(void)
{
BUILD_BUG_ON(FIXADDR_SIZE > PMD_SIZE);
init_pmd(__fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK);
}
#endif
void __init paging_init(void)
{
memset(swapper_pg_dir, 0, PAGE_SIZE);
#ifdef CONFIG_HIGHMEM
fixedrange_init();
pkmap_page_table = init_pmd(PKMAP_BASE);
kmap_init();
#endif
}
/*
......
......@@ -149,6 +149,21 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
local_irq_restore(flags);
}
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
if (end > start && start >= TASK_SIZE && end <= PAGE_OFFSET &&
end - start < _TLB_ENTRIES << PAGE_SHIFT) {
start &= PAGE_MASK;
while (start < end) {
invalidate_itlb_mapping(start);
invalidate_dtlb_mapping(start);
start += PAGE_SIZE;
}
} else {
local_flush_tlb_all();
}
}
#ifdef CONFIG_DEBUG_TLB_SANITY
static unsigned get_pte_for_vaddr(unsigned vaddr)
......
......@@ -4,6 +4,7 @@
# "prom monitor" library routines under Linux.
#
obj-y = console.o setup.o
obj-y = setup.o
obj-$(CONFIG_TTY) += console.o
obj-$(CONFIG_NET) += network.o
obj-$(CONFIG_BLK_DEV_SIMDISK) += simdisk.o
......@@ -92,18 +92,8 @@ void __init platform_setup(char** cmdline)
/* early initialization */
extern sysmem_info_t __initdata sysmem;
void platform_init(bp_tag_t* first)
void __init platform_init(bp_tag_t *first)
{
/* Set default memory block if not provided by the bootloader. */
if (sysmem.nr_banks == 0) {
sysmem.nr_banks = 1;
sysmem.bank[0].start = PLATFORM_DEFAULT_MEM_START;
sysmem.bank[0].end = PLATFORM_DEFAULT_MEM_START
+ PLATFORM_DEFAULT_MEM_SIZE;
}
}
/* Heartbeat. Let the LED blink. */
......
......@@ -35,6 +35,10 @@
#define EM_ARCOMPACT 93
#endif
#ifndef EM_XTENSA
#define EM_XTENSA 94
#endif
#ifndef EM_AARCH64
#define EM_AARCH64 183
#endif
......@@ -281,6 +285,7 @@ do_file(char const *const fname)
case EM_AARCH64:
case EM_MICROBLAZE:
case EM_MIPS:
case EM_XTENSA:
break;
} /* end switch */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册