提交 d39af902 编写于 作者: M Max Filippov

xtensa: add alternative kernel memory layouts

MMUv3 is able to support low memory bigger than 128MB.
Implement 256MB and 512MB KSEG layouts:

- add Kconfig selector for KSEG layout;
- add KSEG base address, size and alignment definitions to
  arch/xtensa/include/asm/kmem_layout.h;
- use new definitions in TLB initialization;
- add build time memory map consistency checks.

See Documentation/xtensa/mmu.txt for the details of new memory layouts.
Signed-off-by: NMax Filippov <jcmvbkbc@gmail.com>
上级 f1883aa7
...@@ -62,3 +62,127 @@ limitations apply: ...@@ -62,3 +62,127 @@ limitations apply:
6. The IO area covers the entire 256MB segment of parent-bus-address; the 6. The IO area covers the entire 256MB segment of parent-bus-address; the
"ranges" triplet length field is ignored "ranges" triplet length field is ignored
MMUv3 address space layouts.
============================
Default MMUv2-compatible layout.
Symbol VADDR Size
+------------------+
| Userspace | 0x00000000 TASK_SIZE
+------------------+ 0x40000000
+------------------+
| Page table | 0x80000000
+------------------+ 0x80400000
+------------------+
| KMAP area | PKMAP_BASE PTRS_PER_PTE *
| | DCACHE_N_COLORS *
| | PAGE_SIZE
| | (4MB * DCACHE_N_COLORS)
+------------------+
| Atomic KMAP area | FIXADDR_START KM_TYPE_NR *
| | NR_CPUS *
| | DCACHE_N_COLORS *
| | PAGE_SIZE
+------------------+ FIXADDR_TOP 0xbffff000
+------------------+
| VMALLOC area | VMALLOC_START 0xc0000000 128MB - 64KB
+------------------+ VMALLOC_END
| Cache aliasing | TLBTEMP_BASE_1 0xc7ff0000 DCACHE_WAY_SIZE
| remap area 1 |
+------------------+
| Cache aliasing | TLBTEMP_BASE_2 DCACHE_WAY_SIZE
| remap area 2 |
+------------------+
+------------------+
| Cached KSEG | XCHAL_KSEG_CACHED_VADDR 0xd0000000 128MB
+------------------+
| Uncached KSEG | XCHAL_KSEG_BYPASS_VADDR 0xd8000000 128MB
+------------------+
| Cached KIO | XCHAL_KIO_CACHED_VADDR 0xe0000000 256MB
+------------------+
| Uncached KIO | XCHAL_KIO_BYPASS_VADDR 0xf0000000 256MB
+------------------+
256MB cached + 256MB uncached layout.
Symbol VADDR Size
+------------------+
| Userspace | 0x00000000 TASK_SIZE
+------------------+ 0x40000000
+------------------+
| Page table | 0x80000000
+------------------+ 0x80400000
+------------------+
| KMAP area | PKMAP_BASE PTRS_PER_PTE *
| | DCACHE_N_COLORS *
| | PAGE_SIZE
| | (4MB * DCACHE_N_COLORS)
+------------------+
| Atomic KMAP area | FIXADDR_START KM_TYPE_NR *
| | NR_CPUS *
| | DCACHE_N_COLORS *
| | PAGE_SIZE
+------------------+ FIXADDR_TOP 0x9ffff000
+------------------+
| VMALLOC area | VMALLOC_START 0xa0000000 128MB - 64KB
+------------------+ VMALLOC_END
| Cache aliasing | TLBTEMP_BASE_1 0xa7ff0000 DCACHE_WAY_SIZE
| remap area 1 |
+------------------+
| Cache aliasing | TLBTEMP_BASE_2 DCACHE_WAY_SIZE
| remap area 2 |
+------------------+
+------------------+
| Cached KSEG | XCHAL_KSEG_CACHED_VADDR 0xb0000000 256MB
+------------------+
| Uncached KSEG | XCHAL_KSEG_BYPASS_VADDR 0xc0000000 256MB
+------------------+
+------------------+
| Cached KIO | XCHAL_KIO_CACHED_VADDR 0xe0000000 256MB
+------------------+
| Uncached KIO | XCHAL_KIO_BYPASS_VADDR 0xf0000000 256MB
+------------------+
512MB cached + 512MB uncached layout.
Symbol VADDR Size
+------------------+
| Userspace | 0x00000000 TASK_SIZE
+------------------+ 0x40000000
+------------------+
| Page table | 0x80000000
+------------------+ 0x80400000
+------------------+
| KMAP area | PKMAP_BASE PTRS_PER_PTE *
| | DCACHE_N_COLORS *
| | PAGE_SIZE
| | (4MB * DCACHE_N_COLORS)
+------------------+
| Atomic KMAP area | FIXADDR_START KM_TYPE_NR *
| | NR_CPUS *
| | DCACHE_N_COLORS *
| | PAGE_SIZE
+------------------+ FIXADDR_TOP 0x8ffff000
+------------------+
| VMALLOC area | VMALLOC_START 0x90000000 128MB - 64KB
+------------------+ VMALLOC_END
| Cache aliasing | TLBTEMP_BASE_1 0x97ff0000 DCACHE_WAY_SIZE
| remap area 1 |
+------------------+
| Cache aliasing | TLBTEMP_BASE_2 DCACHE_WAY_SIZE
| remap area 2 |
+------------------+
+------------------+
| Cached KSEG | XCHAL_KSEG_CACHED_VADDR 0xa0000000 512MB
+------------------+
| Uncached KSEG | XCHAL_KSEG_BYPASS_VADDR 0xc0000000 512MB
+------------------+
| Cached KIO | XCHAL_KIO_CACHED_VADDR 0xe0000000 256MB
+------------------+
| Uncached KIO | XCHAL_KIO_BYPASS_VADDR 0xf0000000 256MB
+------------------+
...@@ -236,6 +236,50 @@ config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX ...@@ -236,6 +236,50 @@ config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
If in doubt, say Y. If in doubt, say Y.
config KSEG_PADDR
hex "Physical address of the KSEG mapping"
depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX && MMU
default 0x00000000
help
This is the physical address where KSEG is mapped. Please refer to
the chosen KSEG layout help for the required address alignment.
Unpacked kernel image (including vectors) must be located completely
within KSEG.
Physical memory below this address is not available to linux.
If unsure, leave the default value here.
choice
prompt "KSEG layout"
depends on MMU
default XTENSA_KSEG_MMU_V2
config XTENSA_KSEG_MMU_V2
bool "MMUv2: 128MB cached + 128MB uncached"
help
MMUv2 compatible kernel memory map: TLB way 5 maps 128MB starting
at KSEG_PADDR to 0xd0000000 with cache and to 0xd8000000
without cache.
KSEG_PADDR must be aligned to 128MB.
config XTENSA_KSEG_256M
bool "256MB cached + 256MB uncached"
depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
help
TLB way 6 maps 256MB starting at KSEG_PADDR to 0xb0000000
with cache and to 0xc0000000 without cache.
KSEG_PADDR must be aligned to 256MB.
config XTENSA_KSEG_512M
bool "512MB cached + 512MB uncached"
depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
help
TLB way 6 maps 512MB starting at KSEG_PADDR to 0xa0000000
with cache and to 0xc0000000 without cache.
KSEG_PADDR must be aligned to 256MB.
endchoice
config HIGHMEM config HIGHMEM
bool "High Memory Support" bool "High Memory Support"
depends on MMU depends on MMU
......
...@@ -59,6 +59,11 @@ enum fixed_addresses { ...@@ -59,6 +59,11 @@ enum fixed_addresses {
*/ */
static __always_inline unsigned long fix_to_virt(const unsigned int idx) static __always_inline unsigned long fix_to_virt(const unsigned int idx)
{ {
/* Check if this memory layout is broken because fixmap overlaps page
* table.
*/
BUILD_BUG_ON(FIXADDR_START <
XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE);
BUILD_BUG_ON(idx >= __end_of_fixed_addresses); BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
return __fix_to_virt(idx); return __fix_to_virt(idx);
} }
......
...@@ -68,6 +68,11 @@ void kunmap_high(struct page *page); ...@@ -68,6 +68,11 @@ void kunmap_high(struct page *page);
static inline void *kmap(struct page *page) static inline void *kmap(struct page *page)
{ {
/* Check if this memory layout is broken because PKMAP overlaps
* page table.
*/
BUILD_BUG_ON(PKMAP_BASE <
XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE);
BUG_ON(in_interrupt()); BUG_ON(in_interrupt());
if (!PageHighMem(page)) if (!PageHighMem(page))
return page_address(page); return page_address(page);
......
...@@ -116,22 +116,35 @@ ...@@ -116,22 +116,35 @@
add a5, a5, a4 add a5, a5, a4
bne a5, a2, 3b bne a5, a2, 3b
/* Step 4: Setup MMU with the old V2 mappings. */ /* Step 4: Setup MMU with the requested static mappings. */
movi a6, 0x01000000 movi a6, 0x01000000
wsr a6, ITLBCFG wsr a6, ITLBCFG
wsr a6, DTLBCFG wsr a6, DTLBCFG
isync isync
movi a5, 0xd0000005 movi a5, XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_TLB_WAY
movi a4, CA_WRITEBACK movi a4, XCHAL_KSEG_PADDR + CA_WRITEBACK
wdtlb a4, a5
witlb a4, a5
movi a5, XCHAL_KSEG_BYPASS_VADDR + XCHAL_KSEG_TLB_WAY
movi a4, XCHAL_KSEG_PADDR + CA_BYPASS
wdtlb a4, a5 wdtlb a4, a5
witlb a4, a5 witlb a4, a5
movi a5, 0xd8000005 #ifdef CONFIG_XTENSA_KSEG_512M
movi a4, CA_BYPASS movi a5, XCHAL_KSEG_CACHED_VADDR + 0x10000000 + XCHAL_KSEG_TLB_WAY
movi a4, XCHAL_KSEG_PADDR + 0x10000000 + CA_WRITEBACK
wdtlb a4, a5 wdtlb a4, a5
witlb a4, a5 witlb a4, a5
movi a5, XCHAL_KSEG_BYPASS_VADDR + 0x10000000 + XCHAL_KSEG_TLB_WAY
movi a4, XCHAL_KSEG_PADDR + 0x10000000 + CA_BYPASS
wdtlb a4, a5
witlb a4, a5
#endif
movi a5, XCHAL_KIO_CACHED_VADDR + 6 movi a5, XCHAL_KIO_CACHED_VADDR + 6
movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_WRITEBACK movi a4, XCHAL_KIO_DEFAULT_PADDR + CA_WRITEBACK
wdtlb a4, a5 wdtlb a4, a5
......
...@@ -13,13 +13,59 @@ ...@@ -13,13 +13,59 @@
#include <asm/types.h> #include <asm/types.h>
#ifdef CONFIG_MMU
/* /*
* Fixed TLB translations in the processor. * Fixed TLB translations in the processor.
*/ */
#define XCHAL_PAGE_TABLE_VADDR __XTENSA_UL_CONST(0x80000000)
#define XCHAL_PAGE_TABLE_SIZE __XTENSA_UL_CONST(0x00400000)
#if defined(CONFIG_XTENSA_KSEG_MMU_V2)
#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xd0000000) #define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xd0000000)
#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xd8000000) #define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xd8000000)
#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x08000000) #define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x08000000)
#define XCHAL_KSEG_ALIGNMENT __XTENSA_UL_CONST(0x08000000)
#define XCHAL_KSEG_TLB_WAY 5
#elif defined(CONFIG_XTENSA_KSEG_256M)
#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xb0000000)
#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xc0000000)
#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x10000000)
#define XCHAL_KSEG_ALIGNMENT __XTENSA_UL_CONST(0x10000000)
#define XCHAL_KSEG_TLB_WAY 6
#elif defined(CONFIG_XTENSA_KSEG_512M)
#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xa0000000)
#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xc0000000)
#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x20000000)
#define XCHAL_KSEG_ALIGNMENT __XTENSA_UL_CONST(0x10000000)
#define XCHAL_KSEG_TLB_WAY 6
#else
#error Unsupported KSEG configuration
#endif
#ifdef CONFIG_KSEG_PADDR
#define XCHAL_KSEG_PADDR __XTENSA_UL_CONST(CONFIG_KSEG_PADDR)
#else
#define XCHAL_KSEG_PADDR __XTENSA_UL_CONST(0x00000000) #define XCHAL_KSEG_PADDR __XTENSA_UL_CONST(0x00000000)
#endif
#if XCHAL_KSEG_PADDR & (XCHAL_KSEG_ALIGNMENT - 1)
#error XCHAL_KSEG_PADDR is not properly aligned to XCHAL_KSEG_ALIGNMENT
#endif
#else
#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xd0000000)
#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xd8000000)
#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x08000000)
#endif
#endif #endif
...@@ -27,10 +27,12 @@ ...@@ -27,10 +27,12 @@
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR #define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
#define MAX_MEM_PFN XCHAL_KSEG_SIZE #define MAX_LOW_PFN (PHYS_PFN(XCHAL_KSEG_PADDR) + \
PHYS_PFN(XCHAL_KSEG_SIZE))
#else #else
#define PAGE_OFFSET __XTENSA_UL_CONST(0) #define PAGE_OFFSET __XTENSA_UL_CONST(0)
#define MAX_MEM_PFN (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE) #define MAX_LOW_PFN (PHYS_PFN(PLATFORM_DEFAULT_MEM_START) + \
PHYS_PFN(PLATFORM_DEFAULT_MEM_SIZE))
#endif #endif
#define PGTABLE_START 0x80000000 #define PGTABLE_START 0x80000000
......
...@@ -18,7 +18,8 @@ ...@@ -18,7 +18,8 @@
# define __XTENSA_UL_CONST(x) x # define __XTENSA_UL_CONST(x) x
#else #else
# define __XTENSA_UL(x) ((unsigned long)(x)) # define __XTENSA_UL(x) ((unsigned long)(x))
# define __XTENSA_UL_CONST(x) x##UL # define ___XTENSA_UL_CONST(x) x##UL
# define __XTENSA_UL_CONST(x) ___XTENSA_UL_CONST(x)
#endif #endif
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
...@@ -266,8 +266,7 @@ void __init bootmem_init(void) ...@@ -266,8 +266,7 @@ void __init bootmem_init(void)
if (min_low_pfn > max_pfn) if (min_low_pfn > max_pfn)
panic("No memory found!\n"); panic("No memory found!\n");
max_low_pfn = max_pfn < MAX_MEM_PFN >> PAGE_SHIFT ? max_low_pfn = min(max_pfn, MAX_LOW_PFN);
max_pfn : MAX_MEM_PFN >> PAGE_SHIFT;
/* Find an area to use for the bootmem bitmap. */ /* Find an area to use for the bootmem bitmap. */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册