提交 ac55c768 编写于 作者: D David S. Miller

sparc64: Switch to 4-level page tables.

This has become necessary with chips that support more than 43-bits
of physical addressing.

Based almost entirely upon a patch by Bob Picco.
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
Acked-by: NBob Picco <bob.picco@oracle.com>
上级 473ad7f4
...@@ -57,18 +57,21 @@ void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topa ...@@ -57,18 +57,21 @@ void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topa
typedef struct { unsigned long pte; } pte_t; typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long iopte; } iopte_t; typedef struct { unsigned long iopte; } iopte_t;
typedef struct { unsigned long pmd; } pmd_t; typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned long pud; } pud_t;
typedef struct { unsigned long pgd; } pgd_t; typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t; typedef struct { unsigned long pgprot; } pgprot_t;
#define pte_val(x) ((x).pte) #define pte_val(x) ((x).pte)
#define iopte_val(x) ((x).iopte) #define iopte_val(x) ((x).iopte)
#define pmd_val(x) ((x).pmd) #define pmd_val(x) ((x).pmd)
#define pud_val(x) ((x).pud)
#define pgd_val(x) ((x).pgd) #define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot) #define pgprot_val(x) ((x).pgprot)
#define __pte(x) ((pte_t) { (x) } ) #define __pte(x) ((pte_t) { (x) } )
#define __iopte(x) ((iopte_t) { (x) } ) #define __iopte(x) ((iopte_t) { (x) } )
#define __pmd(x) ((pmd_t) { (x) } ) #define __pmd(x) ((pmd_t) { (x) } )
#define __pud(x) ((pud_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } ) #define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } ) #define __pgprot(x) ((pgprot_t) { (x) } )
...@@ -77,18 +80,21 @@ typedef struct { unsigned long pgprot; } pgprot_t; ...@@ -77,18 +80,21 @@ typedef struct { unsigned long pgprot; } pgprot_t;
typedef unsigned long pte_t; typedef unsigned long pte_t;
typedef unsigned long iopte_t; typedef unsigned long iopte_t;
typedef unsigned long pmd_t; typedef unsigned long pmd_t;
typedef unsigned long pud_t;
typedef unsigned long pgd_t; typedef unsigned long pgd_t;
typedef unsigned long pgprot_t; typedef unsigned long pgprot_t;
#define pte_val(x) (x) #define pte_val(x) (x)
#define iopte_val(x) (x) #define iopte_val(x) (x)
#define pmd_val(x) (x) #define pmd_val(x) (x)
#define pud_val(x) (x)
#define pgd_val(x) (x) #define pgd_val(x) (x)
#define pgprot_val(x) (x) #define pgprot_val(x) (x)
#define __pte(x) (x) #define __pte(x) (x)
#define __iopte(x) (x) #define __iopte(x) (x)
#define __pmd(x) (x) #define __pmd(x) (x)
#define __pud(x) (x)
#define __pgd(x) (x) #define __pgd(x) (x)
#define __pgprot(x) (x) #define __pgprot(x) (x)
......
...@@ -15,6 +15,13 @@ ...@@ -15,6 +15,13 @@
extern struct kmem_cache *pgtable_cache; extern struct kmem_cache *pgtable_cache;
static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
{
pgd_set(pgd, pud);
}
#define pgd_populate(MM, PGD, PUD) __pgd_populate(PGD, PUD)
static inline pgd_t *pgd_alloc(struct mm_struct *mm) static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{ {
return kmem_cache_alloc(pgtable_cache, GFP_KERNEL); return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
...@@ -25,7 +32,23 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) ...@@ -25,7 +32,23 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
kmem_cache_free(pgtable_cache, pgd); kmem_cache_free(pgtable_cache, pgd);
} }
#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
{
pud_set(pud, pmd);
}
#define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD)
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
return kmem_cache_alloc(pgtable_cache,
GFP_KERNEL|__GFP_REPEAT);
}
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
{
kmem_cache_free(pgtable_cache, pud);
}
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{ {
...@@ -91,4 +114,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pte_t *pte, ...@@ -91,4 +114,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pte_t *pte,
#define __pmd_free_tlb(tlb, pmd, addr) \ #define __pmd_free_tlb(tlb, pmd, addr) \
pgtable_free_tlb(tlb, pmd, false) pgtable_free_tlb(tlb, pmd, false)
#define __pud_free_tlb(tlb, pud, addr) \
pgtable_free_tlb(tlb, pud, false)
#endif /* _SPARC64_PGALLOC_H */ #endif /* _SPARC64_PGALLOC_H */
...@@ -20,8 +20,6 @@ ...@@ -20,8 +20,6 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm-generic/pgtable-nopud.h>
/* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB). /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
* The page copy blockops can use 0x6000000 to 0x8000000. * The page copy blockops can use 0x6000000 to 0x8000000.
* The 8K TSB is mapped in the 0x8000000 to 0x8400000 range. * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range.
...@@ -55,13 +53,21 @@ ...@@ -55,13 +53,21 @@
#define PMD_MASK (~(PMD_SIZE-1)) #define PMD_MASK (~(PMD_SIZE-1))
#define PMD_BITS (PAGE_SHIFT - 3) #define PMD_BITS (PAGE_SHIFT - 3)
/* PGDIR_SHIFT determines what a third-level page table entry can map */ /* PUD_SHIFT determines the size of the area a third-level page
#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3) + PMD_BITS) * table can map
*/
#define PUD_SHIFT (PMD_SHIFT + PMD_BITS)
#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
#define PUD_MASK (~(PUD_SIZE-1))
#define PUD_BITS (PAGE_SHIFT - 3)
/* PGDIR_SHIFT determines what a fourth-level page table entry can map */
#define PGDIR_SHIFT (PUD_SHIFT + PUD_BITS)
#define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT) #define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1)) #define PGDIR_MASK (~(PGDIR_SIZE-1))
#define PGDIR_BITS (PAGE_SHIFT - 3) #define PGDIR_BITS (PAGE_SHIFT - 3)
#if (PGDIR_SHIFT + PGDIR_BITS) != 43 #if (PGDIR_SHIFT + PGDIR_BITS) != 53
#error Page table parameters do not cover virtual address space properly. #error Page table parameters do not cover virtual address space properly.
#endif #endif
...@@ -93,6 +99,7 @@ static inline bool kern_addr_valid(unsigned long addr) ...@@ -93,6 +99,7 @@ static inline bool kern_addr_valid(unsigned long addr)
/* Entries per page directory level. */ /* Entries per page directory level. */
#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3)) #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
#define PTRS_PER_PMD (1UL << PMD_BITS) #define PTRS_PER_PMD (1UL << PMD_BITS)
#define PTRS_PER_PUD (1UL << PUD_BITS)
#define PTRS_PER_PGD (1UL << PGDIR_BITS) #define PTRS_PER_PGD (1UL << PGDIR_BITS)
/* Kernel has a separate 44bit address space. */ /* Kernel has a separate 44bit address space. */
...@@ -101,6 +108,9 @@ static inline bool kern_addr_valid(unsigned long addr) ...@@ -101,6 +108,9 @@ static inline bool kern_addr_valid(unsigned long addr)
#define pmd_ERROR(e) \ #define pmd_ERROR(e) \
pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n", \ pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n", \
__FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0)) __FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0))
#define pud_ERROR(e) \
pr_err("%s:%d: bad pud %p(%016lx) seen at (%pS)\n", \
__FILE__, __LINE__, &(e), pud_val(e), __builtin_return_address(0))
#define pgd_ERROR(e) \ #define pgd_ERROR(e) \
pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n", \ pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n", \
__FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0)) __FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0))
...@@ -779,6 +789,11 @@ static inline int pmd_present(pmd_t pmd) ...@@ -779,6 +789,11 @@ static inline int pmd_present(pmd_t pmd)
#define pud_bad(pud) ((pud_val(pud) & ~PAGE_MASK) || \ #define pud_bad(pud) ((pud_val(pud) & ~PAGE_MASK) || \
!__kern_addr_valid(pud_val(pud))) !__kern_addr_valid(pud_val(pud)))
#define pgd_none(pgd) (!pgd_val(pgd))
#define pgd_bad(pgd) ((pgd_val(pgd) & ~PAGE_MASK) || \
!__kern_addr_valid(pgd_val(pgd)))
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
void set_pmd_at(struct mm_struct *mm, unsigned long addr, void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd); pmd_t *pmdp, pmd_t pmd);
...@@ -815,10 +830,17 @@ static inline unsigned long __pmd_page(pmd_t pmd) ...@@ -815,10 +830,17 @@ static inline unsigned long __pmd_page(pmd_t pmd)
#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL) #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
#define pud_present(pud) (pud_val(pud) != 0U) #define pud_present(pud) (pud_val(pud) != 0U)
#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL) #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
#define pgd_page_vaddr(pgd) \
((unsigned long) __va(pgd_val(pgd)))
#define pgd_present(pgd) (pgd_val(pgd) != 0U)
#define pgd_clear(pgdp) (pgd_val(*(pgd)) = 0UL)
/* Same in both SUN4V and SUN4U. */ /* Same in both SUN4V and SUN4U. */
#define pte_none(pte) (!pte_val(pte)) #define pte_none(pte) (!pte_val(pte))
#define pgd_set(pgdp, pudp) \
(pgd_val(*(pgdp)) = (__pa((unsigned long) (pudp))))
/* to find an entry in a page-table-directory. */ /* to find an entry in a page-table-directory. */
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
...@@ -826,6 +848,11 @@ static inline unsigned long __pmd_page(pmd_t pmd) ...@@ -826,6 +848,11 @@ static inline unsigned long __pmd_page(pmd_t pmd)
/* to find an entry in a kernel page-table-directory */ /* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(address) pgd_offset(&init_mm, address) #define pgd_offset_k(address) pgd_offset(&init_mm, address)
/* Find an entry in the third-level page table.. */
#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
#define pud_offset(pgdp, address) \
((pud_t *) pgd_page_vaddr(*(pgdp)) + pud_index(address))
/* Find an entry in the second-level page table.. */ /* Find an entry in the second-level page table.. */
#define pmd_offset(pudp, address) \ #define pmd_offset(pudp, address) \
((pmd_t *) pud_page_vaddr(*(pudp)) + \ ((pmd_t *) pud_page_vaddr(*(pudp)) + \
......
...@@ -144,6 +144,11 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; ...@@ -144,6 +144,11 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
srlx REG2, 64 - PAGE_SHIFT, REG2; \ srlx REG2, 64 - PAGE_SHIFT, REG2; \
andn REG2, 0x7, REG2; \ andn REG2, 0x7, REG2; \
ldx [REG1 + REG2], REG1; \ ldx [REG1 + REG2], REG1; \
brz,pn REG1, FAIL_LABEL; \
sllx VADDR, 64 - (PUD_SHIFT + PUD_BITS), REG2; \
srlx REG2, 64 - PAGE_SHIFT, REG2; \
andn REG2, 0x7, REG2; \
ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
brz,pn REG1, FAIL_LABEL; \ brz,pn REG1, FAIL_LABEL; \
sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
srlx REG2, 64 - PAGE_SHIFT, REG2; \ srlx REG2, 64 - PAGE_SHIFT, REG2; \
...@@ -197,6 +202,11 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; ...@@ -197,6 +202,11 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
srlx REG2, 64 - PAGE_SHIFT, REG2; \ srlx REG2, 64 - PAGE_SHIFT, REG2; \
andn REG2, 0x7, REG2; \ andn REG2, 0x7, REG2; \
ldxa [PHYS_PGD + REG2] ASI_PHYS_USE_EC, REG1; \ ldxa [PHYS_PGD + REG2] ASI_PHYS_USE_EC, REG1; \
brz,pn REG1, FAIL_LABEL; \
sllx VADDR, 64 - (PUD_SHIFT + PUD_BITS), REG2; \
srlx REG2, 64 - PAGE_SHIFT, REG2; \
andn REG2, 0x7, REG2; \
ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
brz,pn REG1, FAIL_LABEL; \ brz,pn REG1, FAIL_LABEL; \
sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
srlx REG2, 64 - PAGE_SHIFT, REG2; \ srlx REG2, 64 - PAGE_SHIFT, REG2; \
......
...@@ -1467,6 +1467,13 @@ static void __init pcpu_populate_pte(unsigned long addr) ...@@ -1467,6 +1467,13 @@ static void __init pcpu_populate_pte(unsigned long addr)
pud_t *pud; pud_t *pud;
pmd_t *pmd; pmd_t *pmd;
if (pgd_none(*pgd)) {
pud_t *new;
new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
pgd_populate(&init_mm, pgd, new);
}
pud = pud_offset(pgd, addr); pud = pud_offset(pgd, addr);
if (pud_none(*pud)) { if (pud_none(*pud)) {
pmd_t *new; pmd_t *new;
......
...@@ -1390,6 +1390,13 @@ static unsigned long __ref kernel_map_range(unsigned long pstart, ...@@ -1390,6 +1390,13 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
pmd_t *pmd; pmd_t *pmd;
pte_t *pte; pte_t *pte;
if (pgd_none(*pgd)) {
pud_t *new;
new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
alloc_bytes += PAGE_SIZE;
pgd_populate(&init_mm, pgd, new);
}
pud = pud_offset(pgd, vstart); pud = pud_offset(pgd, vstart);
if (pud_none(*pud)) { if (pud_none(*pud)) {
pmd_t *new; pmd_t *new;
...@@ -1856,7 +1863,12 @@ static void __init sun4v_linear_pte_xor_finalize(void) ...@@ -1856,7 +1863,12 @@ static void __init sun4v_linear_pte_xor_finalize(void)
/* paging_init() sets up the page tables */ /* paging_init() sets up the page tables */
static unsigned long last_valid_pfn; static unsigned long last_valid_pfn;
pgd_t swapper_pg_dir[PTRS_PER_PGD];
/* These must be page aligned in order to not trigger the
* alignment tests of pgd_bad() and pud_bad().
*/
pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((aligned (PAGE_SIZE)));
static pud_t swapper_pud_dir[PTRS_PER_PUD] __attribute__ ((aligned (PAGE_SIZE)));
static void sun4u_pgprot_init(void); static void sun4u_pgprot_init(void);
static void sun4v_pgprot_init(void); static void sun4v_pgprot_init(void);
...@@ -1911,6 +1923,8 @@ void __init paging_init(void) ...@@ -1911,6 +1923,8 @@ void __init paging_init(void)
{ {
unsigned long end_pfn, shift, phys_base; unsigned long end_pfn, shift, phys_base;
unsigned long real_end, i; unsigned long real_end, i;
pud_t *pud;
pmd_t *pmd;
int node; int node;
setup_page_offset(); setup_page_offset();
...@@ -2008,9 +2022,18 @@ void __init paging_init(void) ...@@ -2008,9 +2022,18 @@ void __init paging_init(void)
memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir)); memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
/* Now can init the kernel/bad page tables. */ /* The kernel page tables we publish into what the rest of the
pud_set(pud_offset(&swapper_pg_dir[0], 0), * world sees must be adjusted so that they see the PAGE_OFFSET
swapper_low_pmd_dir + (shift / sizeof(pgd_t))); * address of these in-kerenel data structures. However right
* here we must access them from the kernel image side, because
* the trap tables haven't been taken over and therefore we cannot
* take TLB misses in the PAGE_OFFSET linear mappings yet.
*/
pud = swapper_pud_dir + (shift / sizeof(pud_t));
pgd_set(&swapper_pg_dir[0], pud);
pmd = swapper_low_pmd_dir + (shift / sizeof(pmd_t));
pud_set(&swapper_pud_dir[0], pmd);
inherit_prom_mappings(); inherit_prom_mappings();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册