提交 30eebb54 编写于 作者: L Linus Torvalds

Merge branch 'next' of git://git.monstr.eu/linux-2.6-microblaze

Pull arch/microblaze fixes from Michal Simek

* 'next' of git://git.monstr.eu/linux-2.6-microblaze:
  microblaze: Handle TLB skip size dynamically
  microblaze: Introduce TLB skip size
  microblaze: Improve TLB calculation for small systems
  microblaze: Extend space for compiled-in FDT to 32kB
  microblaze: Clear all MSR flags on the first kernel instruction
  microblaze: Use node name instead of compatible string
  microblaze: Fix mapin_ram function
  microblaze: Highmem support
  microblaze: Use active regions
  microblaze: Show more detailed information about memory
  microblaze: Introduce fixmap
  microblaze: mm: Fix lowmem max memory size limits
  microblaze: mm: Use ZONE_DMA instead of ZONE_NORMAL
  microblaze: trivial: Fix typo fault in timer.c
  microblaze: Use vsprintf extention %pf with builtin_return_address
  microblaze: Add PVR version string for MB 8.20.b and 8.30.a
  microblaze: Fix makefile to work with latest toolchain
  microblaze: Fix typo in early_printk.c
config MICROBLAZE
def_bool y
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
select HAVE_FUNCTION_GRAPH_TRACER
......@@ -28,6 +29,12 @@ config SWAP
config RWSEM_GENERIC_SPINLOCK
def_bool y
config ZONE_DMA
def_bool y
config ARCH_POPULATES_NODE_MAP
def_bool y
config RWSEM_XCHGADD_ALGORITHM
bool
......@@ -153,20 +160,18 @@ config XILINX_UNCACHED_SHADOW
The feature requires the design to define the RAM memory controller
window to be twice as large as the actual physical memory.
config HIGHMEM_START_BOOL
bool "Set high memory pool address"
depends on ADVANCED_OPTIONS && HIGHMEM
config HIGHMEM
bool "High memory support"
depends on MMU
help
This option allows you to set the base address of the kernel virtual
area used to map high memory pages. This can be useful in
optimizing the layout of kernel virtual memory.
The address space of Microblaze processors is only 4 Gigabytes large
and it has to accommodate user address space, kernel address
space as well as some memory mapped IO. That means that, if you
have a large amount of physical memory and/or IO, not all of the
memory can be "permanently mapped" by the kernel. The physical
memory that is not permanently mapped is called "high memory".
Say N here unless you know what you are doing.
config HIGHMEM_START
hex "Virtual start address of high memory pool" if HIGHMEM_START_BOOL
depends on MMU
default "0xfe000000"
If unsure, say n.
config LOWMEM_SIZE_BOOL
bool "Set maximum low memory"
......@@ -255,6 +260,10 @@ config MICROBLAZE_32K_PAGES
endchoice
config KERNEL_PAD
hex "Kernel PAD for unpacking" if ADVANCED_OPTIONS
default "0x80000" if MMU
endmenu
source "mm/Kconfig"
......
......@@ -8,7 +8,7 @@ obj-y += linked_dtb.o
targets := linux.bin linux.bin.gz simpleImage.%
OBJCOPYFLAGS := -O binary
OBJCOPYFLAGS := -R .note -R .comment -R .note.gnu.build-id -O binary
# Ensure system.dtb exists
$(obj)/linked_dtb.o: $(obj)/system.dtb
......
/*
* fixmap.h: compile-time virtual memory allocation
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1998 Ingo Molnar
*
* Copyright 2008 Freescale Semiconductor Inc.
* Port to powerpc added by Kumar Gala
*
* Copyright 2011 Michal Simek <monstr@monstr.eu>
* Copyright 2011 PetaLogix Qld Pty Ltd
* Port to Microblaze
*/
#ifndef _ASM_FIXMAP_H
#define _ASM_FIXMAP_H
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
#include <asm/page.h>
#ifdef CONFIG_HIGHMEM
#include <linux/threads.h>
#include <asm/kmap_types.h>
#endif
#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
/*
* Here we define all the compile-time 'special' virtual
* addresses. The point is to have a constant address at
* compile time, but to set the physical address only
* in the boot process. We allocate these special addresses
* from the end of virtual memory (0xfffff000) backwards.
* Also this lets us do fail-safe vmalloc(), we
* can guarantee that these special addresses and
* vmalloc()-ed addresses never overlap.
*
* these 'compile-time allocated' memory buffers are
* fixed-size 4k pages. (or larger if used with an increment
* highger than 1) use fixmap_set(idx,phys) to associate
* physical memory with fixmap indices.
*
* TLB entries of such buffers will not be flushed across
* task switches.
*/
enum fixed_addresses {
FIX_HOLE,
#ifdef CONFIG_HIGHMEM
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * num_possible_cpus()) - 1,
#endif
__end_of_fixed_addresses
};
extern void __set_fixmap(enum fixed_addresses idx,
phys_addr_t phys, pgprot_t flags);
#define set_fixmap(idx, phys) \
__set_fixmap(idx, phys, PAGE_KERNEL)
/*
* Some hardware wants to get fixmapped without caching.
*/
#define set_fixmap_nocache(idx, phys) \
__set_fixmap(idx, phys, PAGE_KERNEL_CI)
#define clear_fixmap(idx) \
__set_fixmap(idx, 0, __pgprot(0))
#define __FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE)
#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
extern void __this_fixmap_does_not_exist(void);
/*
* 'index to address' translation. If anyone tries to use the idx
* directly without tranlation, we catch the bug with a NULL-deference
* kernel oops. Illegal ranges of incoming indices are caught too.
*/
static __always_inline unsigned long fix_to_virt(const unsigned int idx)
{
/*
* this branch gets completely eliminated after inlining,
* except when someone tries to use fixaddr indices in an
* illegal way. (such as mixing up address types or using
* out-of-range indices).
*
* If it doesn't get removed, the linker will complain
* loudly with a reasonably clear error message..
*/
if (idx >= __end_of_fixed_addresses)
__this_fixmap_does_not_exist();
return __fix_to_virt(idx);
}
static inline unsigned long virt_to_fix(const unsigned long vaddr)
{
BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
return __virt_to_fix(vaddr);
}
#endif /* !__ASSEMBLY__ */
#endif
/*
* highmem.h: virtual kernel memory mappings for high memory
*
* Used in CONFIG_HIGHMEM systems for memory pages which
* are not addressable by direct kernel virtual addresses.
*
* Copyright (C) 1999 Gerhard Wichert, Siemens AG
* Gerhard.Wichert@pdb.siemens.de
*
*
* Redesigned the x86 32-bit VM architecture to deal with
* up to 16 Terabyte physical memory. With current x86 CPUs
* we now support up to 64 Gigabytes physical RAM.
*
* Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
*/
#ifndef _ASM_HIGHMEM_H
#define _ASM_HIGHMEM_H
#ifdef __KERNEL__
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/uaccess.h>
#include <asm/fixmap.h>
extern pte_t *kmap_pte;
extern pgprot_t kmap_prot;
extern pte_t *pkmap_page_table;
/*
* Right now we initialize only a single pte table. It can be extended
* easily, subsequent pte tables have to be allocated in one physical
* chunk of RAM.
*/
/*
* We use one full pte table with 4K pages. And with 16K/64K/256K pages pte
* table covers enough memory (32MB/512MB/2GB resp.), so that both FIXMAP
* and PKMAP can be placed in a single pte table. We use 512 pages for PKMAP
* in case of 16K/64K/256K page sizes.
*/
#define PKMAP_ORDER PTE_SHIFT
#define LAST_PKMAP (1 << PKMAP_ORDER)
#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE * (LAST_PKMAP + 1)) \
& PMD_MASK)
#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
#define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
extern void *kmap_high(struct page *page);
extern void kunmap_high(struct page *page);
extern void *kmap_atomic_prot(struct page *page, pgprot_t prot);
extern void __kunmap_atomic(void *kvaddr);
static inline void *kmap(struct page *page)
{
might_sleep();
if (!PageHighMem(page))
return page_address(page);
return kmap_high(page);
}
static inline void kunmap(struct page *page)
{
BUG_ON(in_interrupt());
if (!PageHighMem(page))
return;
kunmap_high(page);
}
static inline void *__kmap_atomic(struct page *page)
{
return kmap_atomic_prot(page, kmap_prot);
}
static inline struct page *kmap_atomic_to_page(void *ptr)
{
unsigned long idx, vaddr = (unsigned long) ptr;
pte_t *pte;
if (vaddr < FIXADDR_START)
return virt_to_page(ptr);
idx = virt_to_fix(vaddr);
pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
return pte_page(*pte);
}
#define flush_cache_kmaps() { flush_icache(); flush_dcache(); }
#endif /* __KERNEL__ */
#endif /* _ASM_HIGHMEM_H */
......@@ -56,6 +56,12 @@ typedef struct _SEGREG {
extern void _tlbie(unsigned long va); /* invalidate a TLB entry */
extern void _tlbia(void); /* invalidate all TLB entries */
/*
* tlb_skip size stores actual number skipped TLBs from TLB0 - every directy TLB
* mapping has to increase tlb_skip size.
*/
extern u32 tlb_skip;
# endif /* __ASSEMBLY__ */
/*
......@@ -69,6 +75,12 @@ extern void _tlbia(void); /* invalidate all TLB entries */
# define MICROBLAZE_TLB_SIZE 64
/* For cases when you want to skip some TLB entries */
# define MICROBLAZE_TLB_SKIP 0
/* Use the last TLB for temporary access to LMB */
# define MICROBLAZE_LMB_TLB_ID 63
/*
* TLB entries are defined by a "high" tag portion and a "low" data
* portion. The data portion is 32-bits.
......
......@@ -135,8 +135,8 @@ extern unsigned long min_low_pfn;
extern unsigned long max_pfn;
extern unsigned long memory_start;
extern unsigned long memory_end;
extern unsigned long memory_size;
extern unsigned long lowmem_size;
extern int page_is_ram(unsigned long pfn);
......
......@@ -94,8 +94,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
/* Start and end of the vmalloc area. */
/* Make sure to map the vmalloc area above the pinned kernel memory area
of 32Mb. */
#define VMALLOC_START (CONFIG_KERNEL_START + \
max(32 * 1024 * 1024UL, memory_size))
#define VMALLOC_START (CONFIG_KERNEL_START + CONFIG_LOWMEM_SIZE)
#define VMALLOC_END ioremap_bot
#endif /* __ASSEMBLY__ */
......
......@@ -39,7 +39,8 @@ extern void of_platform_reset_gpio_probe(void);
void time_init(void);
void init_IRQ(void);
void machine_early_init(const char *cmdline, unsigned int ram,
unsigned int fdt, unsigned int msr);
unsigned int fdt, unsigned int msr, unsigned int tlb0,
unsigned int tlb1);
void machine_restart(char *cmd);
void machine_shutdown(void);
......
......@@ -83,6 +83,7 @@ void default_idle(void);
void free_init_pages(char *what, unsigned long begin, unsigned long end);
void free_initmem(void);
extern char *klimit;
extern unsigned long kernel_tlb;
extern void ret_from_fork(void);
extern void *alloc_maybe_bootmem(size_t size, gfp_t mask);
......
......@@ -80,7 +80,7 @@ extern unsigned long search_exception_table(unsigned long);
static inline int ___range_ok(unsigned long addr, unsigned long size)
{
return ((addr < memory_start) ||
((addr + size) > memory_end));
((addr + size - 1) > (memory_start + memory_size - 1)));
}
#define __range_ok(addr, size) \
......
......@@ -35,6 +35,8 @@ const struct cpu_ver_key cpu_ver_lookup[] = {
{"8.00.b", 0x13},
{"8.10.a", 0x14},
{"8.20.a", 0x15},
{"8.20.b", 0x16},
{"8.30.a", 0x17},
{NULL, 0},
};
......
......@@ -171,10 +171,24 @@ void __init remap_early_printk(void)
{
if (!early_console_initialized || !early_console)
return;
printk(KERN_INFO "early_printk_console remaping from 0x%x to ",
printk(KERN_INFO "early_printk_console remapping from 0x%x to ",
base_addr);
base_addr = (u32) ioremap(base_addr, PAGE_SIZE);
printk(KERN_CONT "0x%x\n", base_addr);
/*
* Early console is on the top of skipped TLB entries
* decrease tlb_skip size ensure that hardcoded TLB entry will be
* used by generic algorithm
* FIXME check if early console mapping is on the top by rereading
* TLB entry and compare baseaddr
* mts rtlbx, (tlb_skip - 1)
* nop
* mfs rX, rtlblo
* nop
* cmp rX, orig_base_addr
*/
tlb_skip -= 1;
}
void __init disable_early_printk(void)
......
......@@ -63,9 +63,7 @@ ENTRY(_start)
real_start:
#endif
mfs r1, rmsr
andi r1, r1, ~2
mts rmsr, r1
mts rmsr, r0
/*
* According to Xilinx, msrclr instruction behaves like 'mfs rX,rpc'
* if the msrclr instruction is not enabled. We use this to detect
......@@ -73,6 +71,7 @@ real_start:
* r8 == 0 - msr instructions are implemented
* r8 != 0 - msr instructions are not implemented
*/
mfs r1, rmsr
msrclr r8, 0 /* clear nothing - just read msr for test */
cmpu r8, r8, r1 /* r1 must contain msr reg content */
......@@ -96,7 +95,7 @@ big_endian:
_prepare_copy_fdt:
or r11, r0, r0 /* incremment */
ori r4, r0, TOPHYS(_fdt_start)
ori r3, r0, (0x4000 - 4)
ori r3, r0, (0x8000 - 4)
_copy_fdt:
lw r12, r7, r11 /* r12 = r7 + r11 */
sw r12, r4, r11 /* addr[r4 + r11] = r12 */
......@@ -150,6 +149,7 @@ _copy_bram:
_invalidate:
mts rtlbx, r3
mts rtlbhi, r0 /* flush: ensure V is clear */
mts rtlblo, r0
bgtid r3, _invalidate /* loop for all entries */
addik r3, r3, -1
/* sync */
......@@ -169,6 +169,53 @@ _invalidate:
addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */
tophys(r4,r3) /* Load the kernel physical address */
/* start to do TLB calculation */
addik r12, r0, _end
rsub r12, r3, r12
addik r12, r12, CONFIG_KERNEL_PAD /* that's the pad */
or r9, r0, r0 /* TLB0 = 0 */
or r10, r0, r0 /* TLB1 = 0 */
addik r11, r12, -0x1000000
bgei r11, GT16 /* size is greater than 16MB */
addik r11, r12, -0x0800000
bgei r11, GT8 /* size is greater than 8MB */
addik r11, r12, -0x0400000
bgei r11, GT4 /* size is greater than 4MB */
/* size is less than 4MB */
addik r11, r12, -0x0200000
bgei r11, GT2 /* size is greater than 2MB */
addik r9, r0, 0x0100000 /* TLB0 must be 1MB */
addik r11, r12, -0x0100000
bgei r11, GT1 /* size is greater than 1MB */
/* TLB1 is 0 which is setup above */
bri tlb_end
GT4: /* r11 contains the rest - will be either 1 or 4 */
ori r9, r0, 0x400000 /* TLB0 is 4MB */
bri TLB1
GT16: /* TLB0 is 16MB */
addik r9, r0, 0x1000000 /* means TLB0 is 16MB */
TLB1:
/* must be used r2 because of substract if failed */
addik r2, r11, -0x0400000
bgei r2, GT20 /* size is greater than 16MB */
/* size is >16MB and <20MB */
addik r11, r11, -0x0100000
bgei r11, GT17 /* size is greater than 17MB */
/* kernel is >16MB and < 17MB */
GT1:
addik r10, r0, 0x0100000 /* means TLB1 is 1MB */
bri tlb_end
GT2: /* TLB0 is 0 and TLB1 will be 4MB */
GT17: /* TLB1 is 4MB - kernel size <20MB */
addik r10, r0, 0x0400000 /* means TLB1 is 4MB */
bri tlb_end
GT8: /* TLB0 is still zero that's why I can use only TLB1 */
GT20: /* TLB1 is 16MB - kernel size >20MB */
addik r10, r0, 0x1000000 /* means TLB1 is 16MB */
tlb_end:
/*
* Configure and load two entries into TLB slots 0 and 1.
* In case we are pinning TLBs, these are reserved in by the
......@@ -178,28 +225,81 @@ _invalidate:
andi r4,r4,0xfffffc00 /* Mask off the real page number */
ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */
/*
* TLB0 is always used - check if is not zero (r9 stores TLB0 value)
* if is use TLB1 value and clear it (r10 stores TLB1 value)
*/
bnei r9, tlb0_not_zero
add r9, r10, r0
add r10, r0, r0
tlb0_not_zero:
/* look at the code below */
ori r30, r0, 0x200
andi r29, r9, 0x100000
bneid r29, 1f
addik r30, r30, 0x80
andi r29, r9, 0x400000
bneid r29, 1f
addik r30, r30, 0x80
andi r29, r9, 0x1000000
bneid r29, 1f
addik r30, r30, 0x80
1:
andi r3,r3,0xfffffc00 /* Mask off the effective page number */
ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_16M))
ori r3,r3,(TLB_VALID)
or r3, r3, r30
mts rtlbx,r0 /* TLB slow 0 */
/* Load tlb_skip size value which is index to first unused TLB entry */
lwi r11, r0, TOPHYS(tlb_skip)
mts rtlbx,r11 /* TLB slow 0 */
mts rtlblo,r4 /* Load the data portion of the entry */
mts rtlbhi,r3 /* Load the tag portion of the entry */
addik r4, r4, 0x01000000 /* Map next 16 M entries */
addik r3, r3, 0x01000000
/* Increase tlb_skip size */
addik r11, r11, 1
swi r11, r0, TOPHYS(tlb_skip)
/* TLB1 can be zeroes that's why we not setup it */
beqi r10, jump_over2
/* look at the code below */
ori r30, r0, 0x200
andi r29, r10, 0x100000
bneid r29, 1f
addik r30, r30, 0x80
andi r29, r10, 0x400000
bneid r29, 1f
addik r30, r30, 0x80
andi r29, r10, 0x1000000
bneid r29, 1f
addik r30, r30, 0x80
1:
addk r4, r4, r9 /* previous addr + TLB0 size */
addk r3, r3, r9
ori r6,r0,1 /* TLB slot 1 */
mts rtlbx,r6
andi r3,r3,0xfffffc00 /* Mask off the effective page number */
ori r3,r3,(TLB_VALID)
or r3, r3, r30
lwi r11, r0, TOPHYS(tlb_skip)
mts rtlbx, r11 /* r11 is used from TLB0 */
mts rtlblo,r4 /* Load the data portion of the entry */
mts rtlbhi,r3 /* Load the tag portion of the entry */
/* Increase tlb_skip size */
addik r11, r11, 1
swi r11, r0, TOPHYS(tlb_skip)
jump_over2:
/*
* Load a TLB entry for LMB, since we need access to
* the exception vectors, using a 4k real==virtual mapping.
*/
ori r6,r0,3 /* TLB slot 3 */
/* Use temporary TLB_ID for LMB - clear this temporary mapping later */
ori r6, r0, MICROBLAZE_LMB_TLB_ID
mts rtlbx,r6
ori r4,r0,(TLB_WR | TLB_EX)
......@@ -238,8 +338,8 @@ start_here:
* Please see $(ARCH)/mach-$(SUBARCH)/setup.c for
* the function.
*/
addik r9, r0, machine_early_init
brald r15, r9
addik r11, r0, machine_early_init
brald r15, r11
nop
#ifndef CONFIG_MMU
......@@ -268,8 +368,7 @@ start_here:
/* Load up the kernel context */
kernel_load_context:
# Keep entry 0 and 1 valid. Entry 3 mapped to LMB can go away.
ori r5,r0,3
ori r5, r0, MICROBLAZE_LMB_TLB_ID
mts rtlbx,r5
nop
mts rtlbhi,r0
......
......@@ -820,19 +820,26 @@ ex_handler_done:
* Upon exit, we reload everything and RFI.
* A common place to load the TLB.
*/
.section .data
.align 4
.global tlb_skip
tlb_skip:
.long MICROBLAZE_TLB_SKIP
tlb_index:
.long 1 /* MS: storing last used tlb index */
/* MS: storing last used tlb index */
.long MICROBLAZE_TLB_SIZE/2
.previous
finish_tlb_load:
/* MS: load the last used TLB index. */
lwi r5, r0, TOPHYS(tlb_index)
addik r5, r5, 1 /* MS: inc tlb_index -> use next one */
/* MS: FIXME this is potential fault, because this is mask not count */
andi r5, r5, (MICROBLAZE_TLB_SIZE-1)
andi r5, r5, MICROBLAZE_TLB_SIZE - 1
ori r6, r0, 1
cmp r31, r5, r6
blti r31, ex12
addik r5, r6, 1
lwi r5, r0, TOPHYS(tlb_skip)
ex12:
/* MS: save back current TLB index */
swi r5, r0, TOPHYS(tlb_index)
......
......@@ -151,8 +151,8 @@ void __init init_IRQ(void)
#ifdef CONFIG_SELFMOD_INTC
selfmod_function((int *) arr_func, intc_baseaddr);
#endif
printk(KERN_INFO "XPS intc #0 at 0x%08x, num_irq=%d, edge=0x%x\n",
intc_baseaddr, nr_irq, intr_mask);
printk(KERN_INFO "%s #0 at 0x%08x, num_irq=%d, edge=0x%x\n",
intc->name, intc_baseaddr, nr_irq, intr_mask);
/*
* Disable all external interrupts until they are
......
......@@ -29,16 +29,16 @@
.type _tlbia, @function
.align 4;
_tlbia:
addik r12, r0, MICROBLAZE_TLB_SIZE - 1 /* flush all entries (63 - 3) */
lwi r12, r0, tlb_skip;
/* isync */
_tlbia_1:
mts rtlbx, r12
nop
mts rtlbhi, r0 /* flush: ensure V is clear */
nop
addik r11, r12, -2
rsubi r11, r12, MICROBLAZE_TLB_SIZE - 1
bneid r11, _tlbia_1 /* loop for all entries */
addik r12, r12, -1
addik r12, r12, 1
/* sync */
rtsd r15, 8
nop
......@@ -75,7 +75,7 @@ early_console_reg_tlb_alloc:
* Load a TLB entry for the UART, so that microblaze_progress() can use
* the UARTs nice and early. We use a 4k real==virtual mapping.
*/
ori r4, r0, MICROBLAZE_TLB_SIZE - 1
lwi r4, r0, tlb_skip
mts rtlbx, r4 /* TLB slot 63 */
or r4,r5,r0
......@@ -89,6 +89,11 @@ early_console_reg_tlb_alloc:
nop
mts rtlbhi,r5 /* Load the tag portion of the entry */
nop
lwi r5, r0, tlb_skip
addik r5, r5, 1
swi r5, r0, tlb_skip
rtsd r15, 8
nop
......
......@@ -95,8 +95,11 @@ inline unsigned get_romfs_len(unsigned *addr)
}
#endif /* CONFIG_MTD_UCLINUX_EBSS */
unsigned long kernel_tlb;
void __init machine_early_init(const char *cmdline, unsigned int ram,
unsigned int fdt, unsigned int msr)
unsigned int fdt, unsigned int msr, unsigned int tlb0,
unsigned int tlb1)
{
unsigned long *src, *dst;
unsigned int offset = 0;
......@@ -143,6 +146,12 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
setup_early_printk(NULL);
#endif
/* setup kernel_tlb after BSS cleaning
* Maybe worth to move to asm code */
kernel_tlb = tlb0 + tlb1;
/* printk("TLB1 0x%08x, TLB0 0x%08x, tlb 0x%x\n", tlb0,
tlb1, kernel_tlb); */
printk("Ramdisk addr 0x%08x, ", ram);
if (fdt)
printk("FDT at 0x%08x\n", fdt);
......@@ -197,6 +206,19 @@ static int microblaze_debugfs_init(void)
return of_debugfs_root == NULL;
}
arch_initcall(microblaze_debugfs_init);
static int __init debugfs_tlb(void)
{
struct dentry *d;
if (!of_debugfs_root)
return -ENODEV;
d = debugfs_create_u32("tlb_skip", S_IRUGO, of_debugfs_root, &tlb_skip);
if (!d)
return -ENOMEM;
}
device_initcall(debugfs_tlb);
#endif
static int dflt_bus_notify(struct notifier_block *nb,
......
......@@ -79,7 +79,7 @@ static inline void microblaze_timer0_start_periodic(unsigned long load_val)
* !PWMA - disable pwm
* TINT - clear interrupt status
* ENT- enable timer itself
* EINT - enable interrupt
* ENIT - enable interrupt
* !LOAD - clear the bit to let go
* ARHT - auto reload
* !CAPT - no external trigger
......@@ -274,8 +274,8 @@ void __init time_init(void)
#ifdef CONFIG_SELFMOD_TIMER
selfmod_function((int *) arr_func, timer_baseaddr);
#endif
printk(KERN_INFO "XPS timer #0 at 0x%08x, irq=%d\n",
timer_baseaddr, irq);
printk(KERN_INFO "%s #0 at 0x%08x, irq=%d\n",
timer->name, timer_baseaddr, irq);
/* If there is clock-frequency property than use it */
prop = of_get_property(timer, "clock-frequency", NULL);
......
......@@ -44,7 +44,7 @@ SECTIONS {
__fdt_blob : AT(ADDR(__fdt_blob) - LOAD_OFFSET) {
_fdt_start = . ; /* place for fdt blob */
*(__fdt_blob) ; /* Any link-placed DTB */
. = _fdt_start + 0x4000; /* Pad up to 16kbyte */
. = _fdt_start + 0x8000; /* Pad up to 32kbyte */
_fdt_end = . ;
}
......
......@@ -5,3 +5,4 @@
obj-y := consistent.o init.o
obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o
obj-$(CONFIG_HIGHMEM) += highmem.o
/*
* highmem.c: virtual kernel memory mappings for high memory
*
* PowerPC version, stolen from the i386 version.
*
* Used in CONFIG_HIGHMEM systems for memory pages which
* are not addressable by direct kernel virtual addresses.
*
* Copyright (C) 1999 Gerhard Wichert, Siemens AG
* Gerhard.Wichert@pdb.siemens.de
*
*
* Redesigned the x86 32-bit VM architecture to deal with
* up to 16 Terrabyte physical memory. With current x86 CPUs
* we now support up to 64 Gigabytes physical RAM.
*
* Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
*
* Reworked for PowerPC by various contributors. Moved from
* highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp.
*/
#include <linux/highmem.h>
#include <linux/module.h>
/*
* The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
* gives a more generic (and caching) interface. But kmap_atomic can
* be used in IRQ contexts, so in some (very limited) cases we need
* it.
*/
#include <asm/tlbflush.h>
void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{
unsigned long vaddr;
int idx, type;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(*(kmap_pte-idx)));
#endif
set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
local_flush_tlb_page(NULL, vaddr);
return (void *) vaddr;
}
EXPORT_SYMBOL(kmap_atomic_prot);
void __kunmap_atomic(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
int type;
if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
pagefault_enable();
return;
}
type = kmap_atomic_idx();
#ifdef CONFIG_DEBUG_HIGHMEM
{
unsigned int idx;
idx = type + KM_TYPE_NR * smp_processor_id();
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
/*
* force other mappings to Oops if they'll try to access
* this pte without first remap it
*/
pte_clear(&init_mm, vaddr, kmap_pte-idx);
local_flush_tlb_page(NULL, vaddr);
}
#endif
kmap_atomic_idx_pop();
pagefault_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);
......@@ -24,6 +24,7 @@
#include <asm/pgalloc.h>
#include <asm/sections.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
/* Use for MMU and noMMU because of PCI generic code */
int mem_init_done;
......@@ -44,9 +45,56 @@ char *klimit = _end;
*/
unsigned long memory_start;
EXPORT_SYMBOL(memory_start);
unsigned long memory_end; /* due to mm/nommu.c */
unsigned long memory_size;
EXPORT_SYMBOL(memory_size);
unsigned long lowmem_size;
#ifdef CONFIG_HIGHMEM
pte_t *kmap_pte;
EXPORT_SYMBOL(kmap_pte);
pgprot_t kmap_prot;
EXPORT_SYMBOL(kmap_prot);
static inline pte_t *virt_to_kpte(unsigned long vaddr)
{
return pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr),
vaddr), vaddr);
}
static void __init highmem_init(void)
{
pr_debug("%x\n", (u32)PKMAP_BASE);
map_page(PKMAP_BASE, 0, 0); /* XXX gross */
pkmap_page_table = virt_to_kpte(PKMAP_BASE);
kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
kmap_prot = PAGE_KERNEL;
}
static unsigned long highmem_setup(void)
{
unsigned long pfn;
unsigned long reservedpages = 0;
for (pfn = max_low_pfn; pfn < max_pfn; ++pfn) {
struct page *page = pfn_to_page(pfn);
/* FIXME not sure about */
if (memblock_is_reserved(pfn << PAGE_SHIFT))
continue;
ClearPageReserved(page);
init_page_count(page);
__free_page(page);
totalhigh_pages++;
reservedpages++;
}
totalram_pages += totalhigh_pages;
printk(KERN_INFO "High memory: %luk\n",
totalhigh_pages << (PAGE_SHIFT-10));
return reservedpages;
}
#endif /* CONFIG_HIGHMEM */
/*
* paging_init() sets up the page tables - in fact we've already done this.
......@@ -54,17 +102,28 @@ EXPORT_SYMBOL(memory_size);
static void __init paging_init(void)
{
unsigned long zones_size[MAX_NR_ZONES];
#ifdef CONFIG_MMU
int idx;
/* Setup fixmaps */
for (idx = 0; idx < __end_of_fixed_addresses; idx++)
clear_fixmap(idx);
#endif
/* Clean every zones */
memset(zones_size, 0, sizeof(zones_size));
/*
* old: we can DMA to/from any address.put all page into ZONE_DMA
* We use only ZONE_NORMAL
*/
zones_size[ZONE_NORMAL] = max_mapnr;
#ifdef CONFIG_HIGHMEM
highmem_init();
free_area_init(zones_size);
zones_size[ZONE_DMA] = max_low_pfn;
zones_size[ZONE_HIGHMEM] = max_pfn;
#else
zones_size[ZONE_DMA] = max_pfn;
#endif
/* We don't have holes in memory map */
free_area_init_nodes(zones_size);
}
void __init setup_memory(void)
......@@ -78,32 +137,31 @@ void __init setup_memory(void)
/* Find main memory where is the kernel */
for_each_memblock(memory, reg) {
memory_start = (u32)reg->base;
memory_end = (u32) reg->base + reg->size;
lowmem_size = reg->size;
if ((memory_start <= (u32)_text) &&
((u32)_text <= memory_end)) {
memory_size = memory_end - memory_start;
((u32)_text <= (memory_start + lowmem_size - 1))) {
memory_size = lowmem_size;
PAGE_OFFSET = memory_start;
printk(KERN_INFO "%s: Main mem: 0x%x-0x%x, "
printk(KERN_INFO "%s: Main mem: 0x%x, "
"size 0x%08x\n", __func__, (u32) memory_start,
(u32) memory_end, (u32) memory_size);
(u32) memory_size);
break;
}
}
if (!memory_start || !memory_end) {
panic("%s: Missing memory setting 0x%08x-0x%08x\n",
__func__, (u32) memory_start, (u32) memory_end);
if (!memory_start || !memory_size) {
panic("%s: Missing memory setting 0x%08x, size=0x%08x\n",
__func__, (u32) memory_start, (u32) memory_size);
}
/* reservation of region where is the kernel */
kernel_align_start = PAGE_DOWN((u32)_text);
/* ALIGN can be remove because _end in vmlinux.lds.S is align */
kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start;
memblock_reserve(kernel_align_start, kernel_align_size);
printk(KERN_INFO "%s: kernel addr=0x%08x-0x%08x size=0x%08x\n",
printk(KERN_INFO "%s: kernel addr:0x%08x-0x%08x size=0x%08x\n",
__func__, kernel_align_start, kernel_align_start
+ kernel_align_size, kernel_align_size);
memblock_reserve(kernel_align_start, kernel_align_size);
#endif
/*
* Kernel:
......@@ -120,11 +178,13 @@ void __init setup_memory(void)
min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */
/* RAM is assumed contiguous */
num_physpages = max_mapnr = memory_size >> PAGE_SHIFT;
max_pfn = max_low_pfn = memory_end >> PAGE_SHIFT;
max_low_pfn = ((u64)memory_start + (u64)lowmem_size) >> PAGE_SHIFT;
max_pfn = ((u64)memory_start + (u64)memory_size) >> PAGE_SHIFT;
printk(KERN_INFO "%s: max_mapnr: %#lx\n", __func__, max_mapnr);
printk(KERN_INFO "%s: min_low_pfn: %#lx\n", __func__, min_low_pfn);
printk(KERN_INFO "%s: max_low_pfn: %#lx\n", __func__, max_low_pfn);
printk(KERN_INFO "%s: max_pfn: %#lx\n", __func__, max_pfn);
/*
* Find an area to use for the bootmem bitmap.
......@@ -137,15 +197,39 @@ void __init setup_memory(void)
PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn);
memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size);
/* Add active regions with valid PFNs */
for_each_memblock(memory, reg) {
unsigned long start_pfn, end_pfn;
start_pfn = memblock_region_memory_base_pfn(reg);
end_pfn = memblock_region_memory_end_pfn(reg);
memblock_set_node(start_pfn << PAGE_SHIFT,
(end_pfn - start_pfn) << PAGE_SHIFT, 0);
}
/* free bootmem is whole main memory */
free_bootmem(memory_start, memory_size);
free_bootmem_with_active_regions(0, max_low_pfn);
/* reserve allocate blocks */
for_each_memblock(reserved, reg) {
pr_debug("reserved - 0x%08x-0x%08x\n",
(u32) reg->base, (u32) reg->size);
reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
unsigned long top = reg->base + reg->size - 1;
pr_debug("reserved - 0x%08x-0x%08x, %lx, %lx\n",
(u32) reg->base, (u32) reg->size, top,
memory_start + lowmem_size - 1);
if (top <= (memory_start + lowmem_size - 1)) {
reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
} else if (reg->base < (memory_start + lowmem_size - 1)) {
unsigned long trunc_size = memory_start + lowmem_size -
reg->base;
reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT);
}
}
/* XXX need to clip this if using highmem? */
sparse_memory_present_with_active_regions(0);
#ifdef CONFIG_MMU
init_bootmem_done = 1;
#endif
......@@ -190,13 +274,58 @@ void free_initmem(void)
void __init mem_init(void)
{
high_memory = (void *)__va(memory_end);
pg_data_t *pgdat;
unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
high_memory = (void *)__va(memory_start + lowmem_size - 1);
/* this will put all memory onto the freelists */
totalram_pages += free_all_bootmem();
printk(KERN_INFO "Memory: %luk/%luk available\n",
nr_free_pages() << (PAGE_SHIFT-10),
num_physpages << (PAGE_SHIFT-10));
for_each_online_pgdat(pgdat) {
unsigned long i;
struct page *page;
for (i = 0; i < pgdat->node_spanned_pages; i++) {
if (!pfn_valid(pgdat->node_start_pfn + i))
continue;
page = pgdat_page_nr(pgdat, i);
if (PageReserved(page))
reservedpages++;
}
}
#ifdef CONFIG_HIGHMEM
reservedpages -= highmem_setup();
#endif
codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
pr_info("Memory: %luk/%luk available (%luk kernel code, "
"%luk reserved, %luk data, %luk bss, %luk init)\n",
nr_free_pages() << (PAGE_SHIFT-10),
num_physpages << (PAGE_SHIFT-10),
codesize >> 10,
reservedpages << (PAGE_SHIFT-10),
datasize >> 10,
bsssize >> 10,
initsize >> 10);
#ifdef CONFIG_MMU
pr_info("Kernel virtual memory layout:\n");
pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
#ifdef CONFIG_HIGHMEM
pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
#endif /* CONFIG_HIGHMEM */
pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
ioremap_bot, ioremap_base);
pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
(unsigned long)VMALLOC_START, VMALLOC_END);
#endif
mem_init_done = 1;
}
......@@ -226,7 +355,6 @@ static void mm_cmdline_setup(void)
maxmem = memparse(p, &p);
if (maxmem && memory_size > maxmem) {
memory_size = maxmem;
memory_end = memory_start + memory_size;
memblock.memory.regions[0].size = memory_size;
}
}
......@@ -270,15 +398,26 @@ asmlinkage void __init mmu_init(void)
machine_restart(NULL);
}
if ((u32) memblock.memory.regions[0].size < 0x1000000) {
printk(KERN_EMERG "Memory must be greater than 16MB\n");
if ((u32) memblock.memory.regions[0].size < 0x400000) {
printk(KERN_EMERG "Memory must be greater than 4MB\n");
machine_restart(NULL);
}
if ((u32) memblock.memory.regions[0].size < kernel_tlb) {
printk(KERN_EMERG "Kernel size is greater than memory node\n");
machine_restart(NULL);
}
/* Find main memory where the kernel is */
memory_start = (u32) memblock.memory.regions[0].base;
memory_end = (u32) memblock.memory.regions[0].base +
(u32) memblock.memory.regions[0].size;
memory_size = memory_end - memory_start;
lowmem_size = memory_size = (u32) memblock.memory.regions[0].size;
if (lowmem_size > CONFIG_LOWMEM_SIZE) {
lowmem_size = CONFIG_LOWMEM_SIZE;
#ifndef CONFIG_HIGHMEM
memory_size = lowmem_size;
#endif
}
mm_cmdline_setup(); /* FIXME parse args from command line - not used */
......@@ -305,15 +444,20 @@ asmlinkage void __init mmu_init(void)
/* Map in all of RAM starting at CONFIG_KERNEL_START */
mapin_ram();
#ifdef CONFIG_HIGHMEM_START_BOOL
ioremap_base = CONFIG_HIGHMEM_START;
/* Extend vmalloc and ioremap area as big as possible */
#ifdef CONFIG_HIGHMEM
ioremap_base = ioremap_bot = PKMAP_BASE;
#else
ioremap_base = 0xfe000000UL; /* for now, could be 0xfffff000 */
#endif /* CONFIG_HIGHMEM_START_BOOL */
ioremap_bot = ioremap_base;
ioremap_base = ioremap_bot = FIXADDR_START;
#endif
/* Initialize the context management stuff */
mmu_context_init();
/* Shortly after that, the entire linear mapping will be available */
/* This will also cause that unflatten device tree will be allocated
* inside 768MB limit */
memblock_set_current_limit(memory_start + lowmem_size - 1);
}
/* This is only called until mem_init is done. */
......@@ -324,11 +468,11 @@ void __init *early_get_page(void)
p = alloc_bootmem_pages(PAGE_SIZE);
} else {
/*
* Mem start + 32MB -> here is limit
* Mem start + kernel_tlb -> here is limit
* because of mem mapping from head.S
*/
p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
memory_start + 0x2000000));
memory_start + kernel_tlb));
}
return p;
}
......
......@@ -37,6 +37,7 @@
#include <linux/io.h>
#include <asm/mmu.h>
#include <asm/sections.h>
#include <asm/fixmap.h>
#define flush_HPTE(X, va, pg) _tlbie(va)
......@@ -44,11 +45,6 @@ unsigned long ioremap_base;
unsigned long ioremap_bot;
EXPORT_SYMBOL(ioremap_bot);
/* The maximum lowmem defaults to 768Mb, but this can be configured to
* another value.
*/
#define MAX_LOW_MEM CONFIG_LOWMEM_SIZE
#ifndef CONFIG_SMP
struct pgtable_cache_struct quicklists;
#endif
......@@ -80,7 +76,7 @@ static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
!(p >= virt_to_phys((unsigned long)&__bss_stop) &&
p < virt_to_phys((unsigned long)__bss_stop))) {
printk(KERN_WARNING "__ioremap(): phys addr "PTE_FMT
" is RAM lr %p\n", (unsigned long)p,
" is RAM lr %pf\n", (unsigned long)p,
__builtin_return_address(0));
return NULL;
}
......@@ -171,7 +167,7 @@ void __init mapin_ram(void)
v = CONFIG_KERNEL_START;
p = memory_start;
for (s = 0; s < memory_size; s += PAGE_SIZE) {
for (s = 0; s < lowmem_size; s += PAGE_SIZE) {
f = _PAGE_PRESENT | _PAGE_ACCESSED |
_PAGE_SHARED | _PAGE_HWEXEC;
if ((char *) v < _stext || (char *) v >= _etext)
......@@ -254,3 +250,13 @@ __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
}
return pte;
}
void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
{
unsigned long address = __fix_to_virt(idx);
if (idx >= __end_of_fixed_addresses)
BUG();
map_page(address, phys, pgprot_val(flags));
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册