提交 dcc1e8dd 编写于 作者: D David S. Miller 提交者: David S. Miller

[SPARC64]: Add a secondary TSB for hugepage mappings.

Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 14778d90
...@@ -175,11 +175,11 @@ config HUGETLB_PAGE_SIZE_4MB ...@@ -175,11 +175,11 @@ config HUGETLB_PAGE_SIZE_4MB
bool "4MB" bool "4MB"
config HUGETLB_PAGE_SIZE_512K config HUGETLB_PAGE_SIZE_512K
depends on !SPARC64_PAGE_SIZE_4MB depends on !SPARC64_PAGE_SIZE_4MB && !SPARC64_PAGE_SIZE_512KB
bool "512K" bool "512K"
config HUGETLB_PAGE_SIZE_64K config HUGETLB_PAGE_SIZE_64K
depends on !SPARC64_PAGE_SIZE_4MB && !SPARC64_PAGE_SIZE_512KB depends on !SPARC64_PAGE_SIZE_4MB && !SPARC64_PAGE_SIZE_512KB && !SPARC64_PAGE_SIZE_64K
bool "64K" bool "64K"
endchoice endchoice
......
...@@ -29,15 +29,15 @@ ...@@ -29,15 +29,15 @@
* *
* index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL; * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL;
* tsb_base = tsb_reg & ~0x7UL; * tsb_base = tsb_reg & ~0x7UL;
* tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask); * tsb_index = ((vaddr >> HASH_SHIFT) & tsb_mask);
* tsb_ptr = tsb_base + (tsb_index * 16); * tsb_ptr = tsb_base + (tsb_index * 16);
*/ */
#define COMPUTE_TSB_PTR(TSB_PTR, VADDR, TMP1, TMP2) \ #define COMPUTE_TSB_PTR(TSB_PTR, VADDR, HASH_SHIFT, TMP1, TMP2) \
and TSB_PTR, 0x7, TMP1; \ and TSB_PTR, 0x7, TMP1; \
mov 512, TMP2; \ mov 512, TMP2; \
andn TSB_PTR, 0x7, TSB_PTR; \ andn TSB_PTR, 0x7, TSB_PTR; \
sllx TMP2, TMP1, TMP2; \ sllx TMP2, TMP1, TMP2; \
srlx VADDR, PAGE_SHIFT, TMP1; \ srlx VADDR, HASH_SHIFT, TMP1; \
sub TMP2, 1, TMP2; \ sub TMP2, 1, TMP2; \
and TMP1, TMP2, TMP1; \ and TMP1, TMP2, TMP1; \
sllx TMP1, 4, TMP1; \ sllx TMP1, 4, TMP1; \
...@@ -53,7 +53,7 @@ sun4v_itlb_miss: ...@@ -53,7 +53,7 @@ sun4v_itlb_miss:
LOAD_ITLB_INFO(%g2, %g4, %g5) LOAD_ITLB_INFO(%g2, %g4, %g5)
COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_itlb_4v) COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_itlb_4v)
COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7) COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g3, %g7)
/* Load TSB tag/pte into %g2/%g3 and compare the tag. */ /* Load TSB tag/pte into %g2/%g3 and compare the tag. */
ldda [%g1] ASI_QUAD_LDD_PHYS_4V, %g2 ldda [%g1] ASI_QUAD_LDD_PHYS_4V, %g2
...@@ -99,7 +99,7 @@ sun4v_dtlb_miss: ...@@ -99,7 +99,7 @@ sun4v_dtlb_miss:
LOAD_DTLB_INFO(%g2, %g4, %g5) LOAD_DTLB_INFO(%g2, %g4, %g5)
COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_dtlb_4v) COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_dtlb_4v)
COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7) COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g3, %g7)
/* Load TSB tag/pte into %g2/%g3 and compare the tag. */ /* Load TSB tag/pte into %g2/%g3 and compare the tag. */
ldda [%g1] ASI_QUAD_LDD_PHYS_4V, %g2 ldda [%g1] ASI_QUAD_LDD_PHYS_4V, %g2
...@@ -171,21 +171,26 @@ sun4v_dtsb_miss: ...@@ -171,21 +171,26 @@ sun4v_dtsb_miss:
/* fallthrough */ /* fallthrough */
/* Create TSB pointer into %g1. This is something like:
*
* index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL;
* tsb_base = tsb_reg & ~0x7UL;
* tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask);
* tsb_ptr = tsb_base + (tsb_index * 16);
*/
sun4v_tsb_miss_common: sun4v_tsb_miss_common:
COMPUTE_TSB_PTR(%g1, %g4, %g5, %g7) COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g5, %g7)
/* Branch directly to page table lookup. We have SCRATCHPAD_MMU_MISS
* still in %g2, so it's quite trivial to get at the PGD PHYS value
* so we can preload it into %g7.
*/
sub %g2, TRAP_PER_CPU_FAULT_INFO, %g2 sub %g2, TRAP_PER_CPU_FAULT_INFO, %g2
#ifdef CONFIG_HUGETLB_PAGE
mov SCRATCHPAD_UTSBREG2, %g5
ldxa [%g5] ASI_SCRATCHPAD, %g5
cmp %g5, -1
be,pt %xcc, 80f
nop
COMPUTE_TSB_PTR(%g5, %g4, HPAGE_SHIFT, %g2, %g7)
/* That clobbered %g2, reload it. */
ldxa [%g0] ASI_SCRATCHPAD, %g2
sub %g2, TRAP_PER_CPU_FAULT_INFO, %g2
80: stx %g5, [%g2 + TRAP_PER_CPU_TSB_HUGE_TEMP]
#endif
ba,pt %xcc, tsb_miss_page_table_walk_sun4v_fastpath ba,pt %xcc, tsb_miss_page_table_walk_sun4v_fastpath
ldx [%g2 + TRAP_PER_CPU_PGD_PADDR], %g7 ldx [%g2 + TRAP_PER_CPU_PGD_PADDR], %g7
......
...@@ -2482,6 +2482,7 @@ void init_cur_cpu_trap(struct thread_info *t) ...@@ -2482,6 +2482,7 @@ void init_cur_cpu_trap(struct thread_info *t)
extern void thread_info_offsets_are_bolixed_dave(void); extern void thread_info_offsets_are_bolixed_dave(void);
extern void trap_per_cpu_offsets_are_bolixed_dave(void); extern void trap_per_cpu_offsets_are_bolixed_dave(void);
extern void tsb_config_offsets_are_bolixed_dave(void);
/* Only invoked on boot processor. */ /* Only invoked on boot processor. */
void __init trap_init(void) void __init trap_init(void)
...@@ -2535,9 +2536,27 @@ void __init trap_init(void) ...@@ -2535,9 +2536,27 @@ void __init trap_init(void)
(TRAP_PER_CPU_CPU_MONDO_BLOCK_PA != (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) || offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
(TRAP_PER_CPU_CPU_LIST_PA != (TRAP_PER_CPU_CPU_LIST_PA !=
offsetof(struct trap_per_cpu, cpu_list_pa))) offsetof(struct trap_per_cpu, cpu_list_pa)) ||
(TRAP_PER_CPU_TSB_HUGE !=
offsetof(struct trap_per_cpu, tsb_huge)) ||
(TRAP_PER_CPU_TSB_HUGE_TEMP !=
offsetof(struct trap_per_cpu, tsb_huge_temp)))
trap_per_cpu_offsets_are_bolixed_dave(); trap_per_cpu_offsets_are_bolixed_dave();
if ((TSB_CONFIG_TSB !=
offsetof(struct tsb_config, tsb)) ||
(TSB_CONFIG_RSS_LIMIT !=
offsetof(struct tsb_config, tsb_rss_limit)) ||
(TSB_CONFIG_NENTRIES !=
offsetof(struct tsb_config, tsb_nentries)) ||
(TSB_CONFIG_REG_VAL !=
offsetof(struct tsb_config, tsb_reg_val)) ||
(TSB_CONFIG_MAP_VADDR !=
offsetof(struct tsb_config, tsb_map_vaddr)) ||
(TSB_CONFIG_MAP_PTE !=
offsetof(struct tsb_config, tsb_map_pte)))
tsb_config_offsets_are_bolixed_dave();
/* Attach to the address space of init_task. On SMP we /* Attach to the address space of init_task. On SMP we
* do this in smp.c:smp_callin for other cpus. * do this in smp.c:smp_callin for other cpus.
*/ */
......
...@@ -3,8 +3,13 @@ ...@@ -3,8 +3,13 @@
* Copyright (C) 2006 David S. Miller <davem@davemloft.net> * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
*/ */
#include <linux/config.h>
#include <asm/tsb.h> #include <asm/tsb.h>
#include <asm/hypervisor.h> #include <asm/hypervisor.h>
#include <asm/page.h>
#include <asm/cpudata.h>
#include <asm/mmu.h>
.text .text
.align 32 .align 32
...@@ -34,34 +39,124 @@ tsb_miss_itlb: ...@@ -34,34 +39,124 @@ tsb_miss_itlb:
ldxa [%g4] ASI_IMMU, %g4 ldxa [%g4] ASI_IMMU, %g4
/* At this point we have: /* At this point we have:
* %g1 -- TSB entry address * %g1 -- PAGE_SIZE TSB entry address
* %g3 -- FAULT_CODE_{D,I}TLB * %g3 -- FAULT_CODE_{D,I}TLB
* %g4 -- missing virtual address * %g4 -- missing virtual address
* %g6 -- TAG TARGET (vaddr >> 22) * %g6 -- TAG TARGET (vaddr >> 22)
*/ */
tsb_miss_page_table_walk: tsb_miss_page_table_walk:
TRAP_LOAD_PGD_PHYS(%g7, %g5) TRAP_LOAD_TRAP_BLOCK(%g7, %g5)
/* And now we have the PGD base physical address in %g7. */ /* Before committing to a full page table walk,
tsb_miss_page_table_walk_sun4v_fastpath: * check the huge page TSB.
USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault) */
#ifdef CONFIG_HUGETLB_PAGE
661: ldx [%g7 + TRAP_PER_CPU_TSB_HUGE], %g5
nop
.section .sun4v_2insn_patch, "ax"
.word 661b
mov SCRATCHPAD_UTSBREG2, %g5
ldxa [%g5] ASI_SCRATCHPAD, %g5
.previous
cmp %g5, -1
be,pt %xcc, 80f
nop
/* We need an aligned pair of registers containing 2 values
* which can be easily rematerialized. %g6 and %g7 foot the
* bill just nicely. We'll save %g6 away into %g2 for the
* huge page TSB TAG comparison.
*
* Perform a huge page TSB lookup.
*/
mov %g6, %g2
and %g5, 0x7, %g6
mov 512, %g7
andn %g5, 0x7, %g5
sllx %g7, %g6, %g7
srlx %g4, HPAGE_SHIFT, %g6
sub %g7, 1, %g7
and %g6, %g7, %g6
sllx %g6, 4, %g6
add %g5, %g6, %g5
TSB_LOAD_QUAD(%g5, %g6)
cmp %g6, %g2
be,a,pt %xcc, tsb_tlb_reload
mov %g7, %g5
/* No match, remember the huge page TSB entry address,
* and restore %g6 and %g7.
*/
TRAP_LOAD_TRAP_BLOCK(%g7, %g6)
srlx %g4, 22, %g6
80: stx %g5, [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP]
#endif
ldx [%g7 + TRAP_PER_CPU_PGD_PADDR], %g7
/* At this point we have: /* At this point we have:
* %g1 -- TSB entry address * %g1 -- TSB entry address
* %g3 -- FAULT_CODE_{D,I}TLB * %g3 -- FAULT_CODE_{D,I}TLB
* %g5 -- physical address of PTE in Linux page tables * %g4 -- missing virtual address
* %g6 -- TAG TARGET (vaddr >> 22) * %g6 -- TAG TARGET (vaddr >> 22)
* %g7 -- page table physical address
*
* We know that both the base PAGE_SIZE TSB and the HPAGE_SIZE
* TSB both lack a matching entry.
*/ */
tsb_reload: tsb_miss_page_table_walk_sun4v_fastpath:
TSB_LOCK_TAG(%g1, %g2, %g7) USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
/* Load and check PTE. */ /* Load and check PTE. */
ldxa [%g5] ASI_PHYS_USE_EC, %g5 ldxa [%g5] ASI_PHYS_USE_EC, %g5
mov 1, %g7 brgez,pn %g5, tsb_do_fault
sllx %g7, TSB_TAG_INVALID_BIT, %g7 nop
brgez,a,pn %g5, tsb_do_fault
TSB_STORE(%g1, %g7) #ifdef CONFIG_HUGETLB_PAGE
661: sethi %uhi(_PAGE_SZALL_4U), %g7
sllx %g7, 32, %g7
.section .sun4v_2insn_patch, "ax"
.word 661b
mov _PAGE_SZALL_4V, %g7
nop
.previous
and %g5, %g7, %g2
661: sethi %uhi(_PAGE_SZHUGE_4U), %g7
sllx %g7, 32, %g7
.section .sun4v_2insn_patch, "ax"
.word 661b
mov _PAGE_SZHUGE_4V, %g7
nop
.previous
cmp %g2, %g7
bne,pt %xcc, 60f
nop
/* It is a huge page, use huge page TSB entry address we
* calculated above.
*/
TRAP_LOAD_TRAP_BLOCK(%g7, %g2)
ldx [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g2
cmp %g2, -1
movne %xcc, %g2, %g1
60:
#endif
/* At this point we have:
* %g1 -- TSB entry address
* %g3 -- FAULT_CODE_{D,I}TLB
* %g5 -- valid PTE
* %g6 -- TAG TARGET (vaddr >> 22)
*/
tsb_reload:
TSB_LOCK_TAG(%g1, %g2, %g7)
TSB_WRITE(%g1, %g5, %g6) TSB_WRITE(%g1, %g5, %g6)
/* Finally, load TLB and return from trap. */ /* Finally, load TLB and return from trap. */
...@@ -240,10 +335,9 @@ tsb_flush: ...@@ -240,10 +335,9 @@ tsb_flush:
* schedule() time. * schedule() time.
* *
* %o0: page table physical address * %o0: page table physical address
* %o1: TSB register value * %o1: TSB base config pointer
* %o2: TSB virtual address * %o2: TSB huge config pointer, or NULL if none
* %o3: TSB mapping locked PTE * %o3: Hypervisor TSB descriptor physical address
* %o4: Hypervisor TSB descriptor physical address
* *
* We have to run this whole thing with interrupts * We have to run this whole thing with interrupts
* disabled so that the current cpu doesn't change * disabled so that the current cpu doesn't change
...@@ -253,63 +347,79 @@ tsb_flush: ...@@ -253,63 +347,79 @@ tsb_flush:
.globl __tsb_context_switch .globl __tsb_context_switch
.type __tsb_context_switch,#function .type __tsb_context_switch,#function
__tsb_context_switch: __tsb_context_switch:
rdpr %pstate, %o5 rdpr %pstate, %g1
wrpr %o5, PSTATE_IE, %pstate wrpr %g1, PSTATE_IE, %pstate
TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
ldub [%g6 + TI_CPU], %g1
sethi %hi(trap_block), %g2
sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g1
or %g2, %lo(trap_block), %g2
add %g2, %g1, %g2
stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR] stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
sethi %hi(tlb_type), %g1 ldx [%o1 + TSB_CONFIG_REG_VAL], %o0
lduw [%g1 + %lo(tlb_type)], %g1 brz,pt %o2, 1f
cmp %g1, 3 mov -1, %g3
bne,pt %icc, 1f
ldx [%o2 + TSB_CONFIG_REG_VAL], %g3
1: stx %g3, [%g2 + TRAP_PER_CPU_TSB_HUGE]
sethi %hi(tlb_type), %g2
lduw [%g2 + %lo(tlb_type)], %g2
cmp %g2, 3
bne,pt %icc, 50f
nop nop
/* Hypervisor TSB switch. */ /* Hypervisor TSB switch. */
mov SCRATCHPAD_UTSBREG1, %g1 mov SCRATCHPAD_UTSBREG1, %o5
stxa %o1, [%g1] ASI_SCRATCHPAD stxa %o0, [%o5] ASI_SCRATCHPAD
mov -1, %g2 mov SCRATCHPAD_UTSBREG2, %o5
mov SCRATCHPAD_UTSBREG2, %g1 stxa %g3, [%o5] ASI_SCRATCHPAD
stxa %g2, [%g1] ASI_SCRATCHPAD
mov 2, %o0
/* Save away %o5's %pstate, we have to use %o5 for cmp %g3, -1
* the hypervisor call. move %xcc, 1, %o0
*/
mov %o5, %g1
mov HV_FAST_MMU_TSB_CTXNON0, %o5 mov HV_FAST_MMU_TSB_CTXNON0, %o5
mov 1, %o0 mov %o3, %o1
mov %o4, %o1
ta HV_FAST_TRAP ta HV_FAST_TRAP
/* Finish up and restore %o5. */ /* Finish up. */
ba,pt %xcc, 9f ba,pt %xcc, 9f
mov %g1, %o5 nop
/* SUN4U TSB switch. */ /* SUN4U TSB switch. */
1: mov TSB_REG, %g1 50: mov TSB_REG, %o5
stxa %o1, [%g1] ASI_DMMU stxa %o0, [%o5] ASI_DMMU
membar #Sync membar #Sync
stxa %o1, [%g1] ASI_IMMU stxa %o0, [%o5] ASI_IMMU
membar #Sync membar #Sync
2: brz %o2, 9f 2: ldx [%o1 + TSB_CONFIG_MAP_VADDR], %o4
nop brz %o4, 9f
ldx [%o1 + TSB_CONFIG_MAP_PTE], %o5
sethi %hi(sparc64_highest_unlocked_tlb_ent), %g2 sethi %hi(sparc64_highest_unlocked_tlb_ent), %g2
mov TLB_TAG_ACCESS, %g1 mov TLB_TAG_ACCESS, %g3
lduw [%g2 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2 lduw [%g2 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2
stxa %o2, [%g1] ASI_DMMU stxa %o4, [%g3] ASI_DMMU
membar #Sync membar #Sync
sllx %g2, 3, %g2 sllx %g2, 3, %g2
stxa %o3, [%g2] ASI_DTLB_DATA_ACCESS stxa %o5, [%g2] ASI_DTLB_DATA_ACCESS
membar #Sync
brz,pt %o2, 9f
nop
ldx [%o2 + TSB_CONFIG_MAP_VADDR], %o4
ldx [%o2 + TSB_CONFIG_MAP_PTE], %o5
mov TLB_TAG_ACCESS, %g3
stxa %o4, [%g3] ASI_DMMU
membar #Sync
sub %g2, (1 << 3), %g2
stxa %o5, [%g2] ASI_DTLB_DATA_ACCESS
membar #Sync membar #Sync
9: 9:
wrpr %o5, %pstate wrpr %g1, %pstate
retl retl
nop nop
......
...@@ -410,9 +410,18 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) ...@@ -410,9 +410,18 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
mm_rss = get_mm_rss(mm); mm_rss = get_mm_rss(mm);
if (unlikely(mm_rss >= mm->context.tsb_rss_limit)) #ifdef CONFIG_HUGETLB_PAGE
tsb_grow(mm, mm_rss); mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE));
#endif
if (unlikely(mm_rss >=
mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
tsb_grow(mm, MM_TSB_BASE, mm_rss);
#ifdef CONFIG_HUGETLB_PAGE
mm_rss = mm->context.huge_pte_count;
if (unlikely(mm_rss >=
mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit))
tsb_grow(mm, MM_TSB_HUGE, mm_rss);
#endif
return; return;
/* /*
......
...@@ -199,13 +199,11 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) ...@@ -199,13 +199,11 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
pte_t *pte = NULL; pte_t *pte = NULL;
pgd = pgd_offset(mm, addr); pgd = pgd_offset(mm, addr);
if (pgd) { pud = pud_alloc(mm, pgd, addr);
pud = pud_offset(pgd, addr); if (pud) {
if (pud) { pmd = pmd_alloc(mm, pud, addr);
pmd = pmd_alloc(mm, pud, addr); if (pmd)
if (pmd) pte = pte_alloc_map(mm, pmd, addr);
pte = pte_alloc_map(mm, pmd, addr);
}
} }
return pte; return pte;
} }
...@@ -231,13 +229,14 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) ...@@ -231,13 +229,14 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
return pte; return pte;
} }
#define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0)
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry) pte_t *ptep, pte_t entry)
{ {
int i; int i;
if (!pte_present(*ptep) && pte_present(entry))
mm->context.huge_pte_count++;
for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
set_pte_at(mm, addr, ptep, entry); set_pte_at(mm, addr, ptep, entry);
ptep++; ptep++;
...@@ -253,6 +252,8 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, ...@@ -253,6 +252,8 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
int i; int i;
entry = *ptep; entry = *ptep;
if (pte_present(entry))
mm->context.huge_pte_count--;
for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
pte_clear(mm, addr, ptep); pte_clear(mm, addr, ptep);
...@@ -302,6 +303,15 @@ static void context_reload(void *__data) ...@@ -302,6 +303,15 @@ static void context_reload(void *__data)
void hugetlb_prefault_arch_hook(struct mm_struct *mm) void hugetlb_prefault_arch_hook(struct mm_struct *mm)
{ {
struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE];
if (likely(tp->tsb != NULL))
return;
tsb_grow(mm, MM_TSB_HUGE, 0);
tsb_context_switch(mm);
smp_tsb_sync(mm);
/* On UltraSPARC-III+ and later, configure the second half of /* On UltraSPARC-III+ and later, configure the second half of
* the Data-TLB for huge pages. * the Data-TLB for huge pages.
*/ */
......
...@@ -283,6 +283,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p ...@@ -283,6 +283,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
struct mm_struct *mm; struct mm_struct *mm;
struct tsb *tsb; struct tsb *tsb;
unsigned long tag, flags; unsigned long tag, flags;
unsigned long tsb_index, tsb_hash_shift;
if (tlb_type != hypervisor) { if (tlb_type != hypervisor) {
unsigned long pfn = pte_pfn(pte); unsigned long pfn = pte_pfn(pte);
...@@ -312,10 +313,26 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p ...@@ -312,10 +313,26 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
mm = vma->vm_mm; mm = vma->vm_mm;
tsb_index = MM_TSB_BASE;
tsb_hash_shift = PAGE_SHIFT;
spin_lock_irqsave(&mm->context.lock, flags); spin_lock_irqsave(&mm->context.lock, flags);
tsb = &mm->context.tsb[(address >> PAGE_SHIFT) & #ifdef CONFIG_HUGETLB_PAGE
(mm->context.tsb_nentries - 1UL)]; if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) {
if ((tlb_type == hypervisor &&
(pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
(tlb_type != hypervisor &&
(pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) {
tsb_index = MM_TSB_HUGE;
tsb_hash_shift = HPAGE_SHIFT;
}
}
#endif
tsb = mm->context.tsb_block[tsb_index].tsb;
tsb += ((address >> tsb_hash_shift) &
(mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
tag = (address >> 22UL); tag = (address >> 22UL);
tsb_insert(tsb, tag, pte_val(pte)); tsb_insert(tsb, tag, pte_val(pte));
......
...@@ -15,9 +15,9 @@ ...@@ -15,9 +15,9 @@
extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries) static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries)
{ {
vaddr >>= PAGE_SHIFT; vaddr >>= hash_shift;
return vaddr & (nentries - 1); return vaddr & (nentries - 1);
} }
...@@ -36,7 +36,8 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end) ...@@ -36,7 +36,8 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
unsigned long v; unsigned long v;
for (v = start; v < end; v += PAGE_SIZE) { for (v = start; v < end; v += PAGE_SIZE) {
unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES); unsigned long hash = tsb_hash(v, PAGE_SHIFT,
KERNEL_TSB_NENTRIES);
struct tsb *ent = &swapper_tsb[hash]; struct tsb *ent = &swapper_tsb[hash];
if (tag_compare(ent->tag, v)) { if (tag_compare(ent->tag, v)) {
...@@ -46,49 +47,91 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end) ...@@ -46,49 +47,91 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
} }
} }
void flush_tsb_user(struct mmu_gather *mp) static void __flush_tsb_one(struct mmu_gather *mp, unsigned long hash_shift, unsigned long tsb, unsigned long nentries)
{ {
struct mm_struct *mm = mp->mm; unsigned long i;
unsigned long nentries, base, flags;
struct tsb *tsb;
int i;
spin_lock_irqsave(&mm->context.lock, flags);
tsb = mm->context.tsb;
nentries = mm->context.tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
base = __pa(tsb);
else
base = (unsigned long) tsb;
for (i = 0; i < mp->tlb_nr; i++) { for (i = 0; i < mp->tlb_nr; i++) {
unsigned long v = mp->vaddrs[i]; unsigned long v = mp->vaddrs[i];
unsigned long tag, ent, hash; unsigned long tag, ent, hash;
v &= ~0x1UL; v &= ~0x1UL;
hash = tsb_hash(v, nentries); hash = tsb_hash(v, hash_shift, nentries);
ent = base + (hash * sizeof(struct tsb)); ent = tsb + (hash * sizeof(struct tsb));
tag = (v >> 22UL); tag = (v >> 22UL);
tsb_flush(ent, tag); tsb_flush(ent, tag);
} }
}
void flush_tsb_user(struct mmu_gather *mp)
{
struct mm_struct *mm = mp->mm;
unsigned long nentries, base, flags;
spin_lock_irqsave(&mm->context.lock, flags);
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
base = __pa(base);
__flush_tsb_one(mp, PAGE_SHIFT, base, nentries);
#ifdef CONFIG_HUGETLB_PAGE
if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
base = __pa(base);
__flush_tsb_one(mp, HPAGE_SHIFT, base, nentries);
}
#endif
spin_unlock_irqrestore(&mm->context.lock, flags); spin_unlock_irqrestore(&mm->context.lock, flags);
} }
static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) #if defined(CONFIG_SPARC64_PAGE_SIZE_8KB)
#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K
#elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB)
#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_64K
#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_64K
#elif defined(CONFIG_SPARC64_PAGE_SIZE_512KB)
#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_512K
#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_512K
#elif defined(CONFIG_SPARC64_PAGE_SIZE_4MB)
#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_4MB
#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_4MB
#else
#error Broken base page size setting...
#endif
#ifdef CONFIG_HUGETLB_PAGE
#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_64K
#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_64K
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_512K
#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_512K
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB
#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB
#else
#error Broken huge page size setting...
#endif
#endif
static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes)
{ {
unsigned long tsb_reg, base, tsb_paddr; unsigned long tsb_reg, base, tsb_paddr;
unsigned long page_sz, tte; unsigned long page_sz, tte;
mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb); mm->context.tsb_block[tsb_idx].tsb_nentries =
tsb_bytes / sizeof(struct tsb);
base = TSBMAP_BASE; base = TSBMAP_BASE;
tte = pgprot_val(PAGE_KERNEL_LOCKED); tte = pgprot_val(PAGE_KERNEL_LOCKED);
tsb_paddr = __pa(mm->context.tsb); tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
BUG_ON(tsb_paddr & (tsb_bytes - 1UL)); BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
/* Use the smallest page size that can map the whole TSB /* Use the smallest page size that can map the whole TSB
...@@ -147,61 +190,49 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) ...@@ -147,61 +190,49 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
/* Physical mapping, no locked TLB entry for TSB. */ /* Physical mapping, no locked TLB entry for TSB. */
tsb_reg |= tsb_paddr; tsb_reg |= tsb_paddr;
mm->context.tsb_reg_val = tsb_reg; mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
mm->context.tsb_map_vaddr = 0; mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0;
mm->context.tsb_map_pte = 0; mm->context.tsb_block[tsb_idx].tsb_map_pte = 0;
} else { } else {
tsb_reg |= base; tsb_reg |= base;
tsb_reg |= (tsb_paddr & (page_sz - 1UL)); tsb_reg |= (tsb_paddr & (page_sz - 1UL));
tte |= (tsb_paddr & ~(page_sz - 1UL)); tte |= (tsb_paddr & ~(page_sz - 1UL));
mm->context.tsb_reg_val = tsb_reg; mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
mm->context.tsb_map_vaddr = base; mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base;
mm->context.tsb_map_pte = tte; mm->context.tsb_block[tsb_idx].tsb_map_pte = tte;
} }
/* Setup the Hypervisor TSB descriptor. */ /* Setup the Hypervisor TSB descriptor. */
if (tlb_type == hypervisor) { if (tlb_type == hypervisor) {
struct hv_tsb_descr *hp = &mm->context.tsb_descr; struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx];
switch (PAGE_SIZE) { switch (tsb_idx) {
case 8192: case MM_TSB_BASE:
default: hp->pgsz_idx = HV_PGSZ_IDX_BASE;
hp->pgsz_idx = HV_PGSZ_IDX_8K;
break; break;
#ifdef CONFIG_HUGETLB_PAGE
case 64 * 1024: case MM_TSB_HUGE:
hp->pgsz_idx = HV_PGSZ_IDX_64K; hp->pgsz_idx = HV_PGSZ_IDX_HUGE;
break;
case 512 * 1024:
hp->pgsz_idx = HV_PGSZ_IDX_512K;
break;
case 4 * 1024 * 1024:
hp->pgsz_idx = HV_PGSZ_IDX_4MB;
break; break;
#endif
default:
BUG();
}; };
hp->assoc = 1; hp->assoc = 1;
hp->num_ttes = tsb_bytes / 16; hp->num_ttes = tsb_bytes / 16;
hp->ctx_idx = 0; hp->ctx_idx = 0;
switch (PAGE_SIZE) { switch (tsb_idx) {
case 8192: case MM_TSB_BASE:
default: hp->pgsz_mask = HV_PGSZ_MASK_BASE;
hp->pgsz_mask = HV_PGSZ_MASK_8K;
break;
case 64 * 1024:
hp->pgsz_mask = HV_PGSZ_MASK_64K;
break;
case 512 * 1024:
hp->pgsz_mask = HV_PGSZ_MASK_512K;
break; break;
#ifdef CONFIG_HUGETLB_PAGE
case 4 * 1024 * 1024: case MM_TSB_HUGE:
hp->pgsz_mask = HV_PGSZ_MASK_4MB; hp->pgsz_mask = HV_PGSZ_MASK_HUGE;
break; break;
#endif
default:
BUG();
}; };
hp->tsb_base = tsb_paddr; hp->tsb_base = tsb_paddr;
hp->resv = 0; hp->resv = 0;
...@@ -241,11 +272,11 @@ void __init tsb_cache_init(void) ...@@ -241,11 +272,11 @@ void __init tsb_cache_init(void)
} }
} }
/* When the RSS of an address space exceeds mm->context.tsb_rss_limit, /* When the RSS of an address space exceeds tsb_rss_limit for a TSB,
* do_sparc64_fault() invokes this routine to try and grow the TSB. * do_sparc64_fault() invokes this routine to try and grow it.
* *
* When we reach the maximum TSB size supported, we stick ~0UL into * When we reach the maximum TSB size supported, we stick ~0UL into
* mm->context.tsb_rss_limit so the grow checks in update_mmu_cache() * tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault()
* will not trigger any longer. * will not trigger any longer.
* *
* The TSB can be anywhere from 8K to 1MB in size, in increasing powers * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
...@@ -257,7 +288,7 @@ void __init tsb_cache_init(void) ...@@ -257,7 +288,7 @@ void __init tsb_cache_init(void)
* the number of entries that the current TSB can hold at once. Currently, * the number of entries that the current TSB can hold at once. Currently,
* we trigger when the RSS hits 3/4 of the TSB capacity. * we trigger when the RSS hits 3/4 of the TSB capacity.
*/ */
void tsb_grow(struct mm_struct *mm, unsigned long rss) void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
{ {
unsigned long max_tsb_size = 1 * 1024 * 1024; unsigned long max_tsb_size = 1 * 1024 * 1024;
unsigned long new_size, old_size, flags; unsigned long new_size, old_size, flags;
...@@ -297,7 +328,8 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss) ...@@ -297,7 +328,8 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss)
* down to a 0-order allocation and force no TSB * down to a 0-order allocation and force no TSB
* growing for this address space. * growing for this address space.
*/ */
if (mm->context.tsb == NULL && new_cache_index > 0) { if (mm->context.tsb_block[tsb_index].tsb == NULL &&
new_cache_index > 0) {
new_cache_index = 0; new_cache_index = 0;
new_size = 8192; new_size = 8192;
new_rss_limit = ~0UL; new_rss_limit = ~0UL;
...@@ -307,8 +339,8 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss) ...@@ -307,8 +339,8 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss)
/* If we failed on a TSB grow, we are under serious /* If we failed on a TSB grow, we are under serious
* memory pressure so don't try to grow any more. * memory pressure so don't try to grow any more.
*/ */
if (mm->context.tsb != NULL) if (mm->context.tsb_block[tsb_index].tsb != NULL)
mm->context.tsb_rss_limit = ~0UL; mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL;
return; return;
} }
...@@ -339,23 +371,26 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss) ...@@ -339,23 +371,26 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss)
*/ */
spin_lock_irqsave(&mm->context.lock, flags); spin_lock_irqsave(&mm->context.lock, flags);
old_tsb = mm->context.tsb; old_tsb = mm->context.tsb_block[tsb_index].tsb;
old_cache_index = (mm->context.tsb_reg_val & 0x7UL); old_cache_index =
old_size = mm->context.tsb_nentries * sizeof(struct tsb); (mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL);
old_size = (mm->context.tsb_block[tsb_index].tsb_nentries *
sizeof(struct tsb));
/* Handle multiple threads trying to grow the TSB at the same time. /* Handle multiple threads trying to grow the TSB at the same time.
* One will get in here first, and bump the size and the RSS limit. * One will get in here first, and bump the size and the RSS limit.
* The others will get in here next and hit this check. * The others will get in here next and hit this check.
*/ */
if (unlikely(old_tsb && (rss < mm->context.tsb_rss_limit))) { if (unlikely(old_tsb &&
(rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) {
spin_unlock_irqrestore(&mm->context.lock, flags); spin_unlock_irqrestore(&mm->context.lock, flags);
kmem_cache_free(tsb_caches[new_cache_index], new_tsb); kmem_cache_free(tsb_caches[new_cache_index], new_tsb);
return; return;
} }
mm->context.tsb_rss_limit = new_rss_limit; mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit;
if (old_tsb) { if (old_tsb) {
extern void copy_tsb(unsigned long old_tsb_base, extern void copy_tsb(unsigned long old_tsb_base,
...@@ -372,8 +407,8 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss) ...@@ -372,8 +407,8 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss)
copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size); copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
} }
mm->context.tsb = new_tsb; mm->context.tsb_block[tsb_index].tsb = new_tsb;
setup_tsb_params(mm, new_size); setup_tsb_params(mm, tsb_index, new_size);
spin_unlock_irqrestore(&mm->context.lock, flags); spin_unlock_irqrestore(&mm->context.lock, flags);
...@@ -394,40 +429,65 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss) ...@@ -394,40 +429,65 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss)
int init_new_context(struct task_struct *tsk, struct mm_struct *mm) int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{ {
#ifdef CONFIG_HUGETLB_PAGE
unsigned long huge_pte_count;
#endif
unsigned int i;
spin_lock_init(&mm->context.lock); spin_lock_init(&mm->context.lock);
mm->context.sparc64_ctx_val = 0UL; mm->context.sparc64_ctx_val = 0UL;
#ifdef CONFIG_HUGETLB_PAGE
/* We reset it to zero because the fork() page copying
* will re-increment the counters as the parent PTEs are
* copied into the child address space.
*/
huge_pte_count = mm->context.huge_pte_count;
mm->context.huge_pte_count = 0;
#endif
/* copy_mm() copies over the parent's mm_struct before calling /* copy_mm() copies over the parent's mm_struct before calling
* us, so we need to zero out the TSB pointer or else tsb_grow() * us, so we need to zero out the TSB pointer or else tsb_grow()
* will be confused and think there is an older TSB to free up. * will be confused and think there is an older TSB to free up.
*/ */
mm->context.tsb = NULL; for (i = 0; i < MM_NUM_TSBS; i++)
mm->context.tsb_block[i].tsb = NULL;
/* If this is fork, inherit the parent's TSB size. We would /* If this is fork, inherit the parent's TSB size. We would
* grow it to that size on the first page fault anyways. * grow it to that size on the first page fault anyways.
*/ */
tsb_grow(mm, get_mm_rss(mm)); tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm));
if (unlikely(!mm->context.tsb)) #ifdef CONFIG_HUGETLB_PAGE
if (unlikely(huge_pte_count))
tsb_grow(mm, MM_TSB_HUGE, huge_pte_count);
#endif
if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
return -ENOMEM; return -ENOMEM;
return 0; return 0;
} }
void destroy_context(struct mm_struct *mm) static void tsb_destroy_one(struct tsb_config *tp)
{ {
unsigned long flags, cache_index; unsigned long cache_index;
cache_index = (mm->context.tsb_reg_val & 0x7UL); if (!tp->tsb)
kmem_cache_free(tsb_caches[cache_index], mm->context.tsb); return;
cache_index = tp->tsb_reg_val & 0x7UL;
kmem_cache_free(tsb_caches[cache_index], tp->tsb);
tp->tsb = NULL;
tp->tsb_reg_val = 0UL;
}
/* We can remove these later, but for now it's useful void destroy_context(struct mm_struct *mm)
* to catch any bogus post-destroy_context() references {
* to the TSB. unsigned long flags, i;
*/
mm->context.tsb = NULL; for (i = 0; i < MM_NUM_TSBS; i++)
mm->context.tsb_reg_val = 0UL; tsb_destroy_one(&mm->context.tsb_block[i]);
spin_lock_irqsave(&ctx_alloc_lock, flags); spin_lock_irqsave(&ctx_alloc_lock, flags);
......
...@@ -71,7 +71,8 @@ struct trap_per_cpu { ...@@ -71,7 +71,8 @@ struct trap_per_cpu {
/* Dcache line 7: Physical addresses of CPU send mondo block and CPU list. */ /* Dcache line 7: Physical addresses of CPU send mondo block and CPU list. */
unsigned long cpu_mondo_block_pa; unsigned long cpu_mondo_block_pa;
unsigned long cpu_list_pa; unsigned long cpu_list_pa;
unsigned long __pad1[2]; unsigned long tsb_huge;
unsigned long tsb_huge_temp;
/* Dcache line 8: Unused, needed to keep trap_block a power-of-2 in size. */ /* Dcache line 8: Unused, needed to keep trap_block a power-of-2 in size. */
unsigned long __pad2[4]; unsigned long __pad2[4];
...@@ -116,6 +117,8 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, ...@@ -116,6 +117,8 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
#define TRAP_PER_CPU_FAULT_INFO 0x40 #define TRAP_PER_CPU_FAULT_INFO 0x40
#define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA 0xc0 #define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA 0xc0
#define TRAP_PER_CPU_CPU_LIST_PA 0xc8 #define TRAP_PER_CPU_CPU_LIST_PA 0xc8
#define TRAP_PER_CPU_TSB_HUGE 0xd0
#define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8
#define TRAP_BLOCK_SZ_SHIFT 8 #define TRAP_BLOCK_SZ_SHIFT 8
......
...@@ -90,18 +90,39 @@ extern void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte ...@@ -90,18 +90,39 @@ extern void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte
extern void tsb_flush(unsigned long ent, unsigned long tag); extern void tsb_flush(unsigned long ent, unsigned long tag);
extern void tsb_init(struct tsb *tsb, unsigned long size); extern void tsb_init(struct tsb *tsb, unsigned long size);
typedef struct { struct tsb_config {
spinlock_t lock;
unsigned long sparc64_ctx_val;
struct tsb *tsb; struct tsb *tsb;
unsigned long tsb_rss_limit; unsigned long tsb_rss_limit;
unsigned long tsb_nentries; unsigned long tsb_nentries;
unsigned long tsb_reg_val; unsigned long tsb_reg_val;
unsigned long tsb_map_vaddr; unsigned long tsb_map_vaddr;
unsigned long tsb_map_pte; unsigned long tsb_map_pte;
struct hv_tsb_descr tsb_descr; };
#define MM_TSB_BASE 0
#ifdef CONFIG_HUGETLB_PAGE
#define MM_TSB_HUGE 1
#define MM_NUM_TSBS 2
#else
#define MM_NUM_TSBS 1
#endif
typedef struct {
spinlock_t lock;
unsigned long sparc64_ctx_val;
unsigned long huge_pte_count;
struct tsb_config tsb_block[MM_NUM_TSBS];
struct hv_tsb_descr tsb_descr[MM_NUM_TSBS];
} mm_context_t; } mm_context_t;
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#define TSB_CONFIG_TSB 0x00
#define TSB_CONFIG_RSS_LIMIT 0x08
#define TSB_CONFIG_NENTRIES 0x10
#define TSB_CONFIG_REG_VAL 0x18
#define TSB_CONFIG_MAP_VADDR 0x20
#define TSB_CONFIG_MAP_PTE 0x28
#endif /* __MMU_H */ #endif /* __MMU_H */
...@@ -29,20 +29,25 @@ extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); ...@@ -29,20 +29,25 @@ extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
extern void destroy_context(struct mm_struct *mm); extern void destroy_context(struct mm_struct *mm);
extern void __tsb_context_switch(unsigned long pgd_pa, extern void __tsb_context_switch(unsigned long pgd_pa,
unsigned long tsb_reg, struct tsb_config *tsb_base,
unsigned long tsb_vaddr, struct tsb_config *tsb_huge,
unsigned long tsb_pte,
unsigned long tsb_descr_pa); unsigned long tsb_descr_pa);
static inline void tsb_context_switch(struct mm_struct *mm) static inline void tsb_context_switch(struct mm_struct *mm)
{ {
__tsb_context_switch(__pa(mm->pgd), mm->context.tsb_reg_val, __tsb_context_switch(__pa(mm->pgd),
mm->context.tsb_map_vaddr, &mm->context.tsb_block[0],
mm->context.tsb_map_pte, #ifdef CONFIG_HUGETLB_PAGE
__pa(&mm->context.tsb_descr)); (mm->context.tsb_block[1].tsb ?
&mm->context.tsb_block[1] :
NULL)
#else
NULL
#endif
, __pa(&mm->context.tsb_descr[0]));
} }
extern void tsb_grow(struct mm_struct *mm, unsigned long mm_rss); extern void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long mm_rss);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern void smp_tsb_sync(struct mm_struct *mm); extern void smp_tsb_sync(struct mm_struct *mm);
#else #else
......
...@@ -30,6 +30,23 @@ ...@@ -30,6 +30,23 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
#define HPAGE_SHIFT 22
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
#define HPAGE_SHIFT 19
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
#define HPAGE_SHIFT 16
#endif
#ifdef CONFIG_HUGETLB_PAGE
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1UL))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#define ARCH_HAS_SETCLEAR_HUGE_PTE
#define ARCH_HAS_HUGETLB_PREFAULT_HOOK
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#endif
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern void _clear_page(void *page); extern void _clear_page(void *page);
...@@ -90,23 +107,6 @@ typedef unsigned long pgprot_t; ...@@ -90,23 +107,6 @@ typedef unsigned long pgprot_t;
#endif /* (STRICT_MM_TYPECHECKS) */ #endif /* (STRICT_MM_TYPECHECKS) */
#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
#define HPAGE_SHIFT 22
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
#define HPAGE_SHIFT 19
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
#define HPAGE_SHIFT 16
#endif
#ifdef CONFIG_HUGETLB_PAGE
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1UL))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#define ARCH_HAS_SETCLEAR_HUGE_PTE
#define ARCH_HAS_HUGETLB_PREFAULT_HOOK
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#endif
#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \ #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \
(_AC(0x0000000070000000,UL)) : \ (_AC(0x0000000070000000,UL)) : \
(_AC(0xfffff80000000000,UL) + (1UL << 32UL))) (_AC(0xfffff80000000000,UL) + (1UL << 32UL)))
......
...@@ -105,6 +105,7 @@ ...@@ -105,6 +105,7 @@
#define _PAGE_RES1_4U _AC(0x0002000000000000,UL) /* Reserved */ #define _PAGE_RES1_4U _AC(0x0002000000000000,UL) /* Reserved */
#define _PAGE_SZ32MB_4U _AC(0x0001000000000000,UL) /* (Panther) 32MB page */ #define _PAGE_SZ32MB_4U _AC(0x0001000000000000,UL) /* (Panther) 32MB page */
#define _PAGE_SZ256MB_4U _AC(0x2001000000000000,UL) /* (Panther) 256MB page */ #define _PAGE_SZ256MB_4U _AC(0x2001000000000000,UL) /* (Panther) 256MB page */
#define _PAGE_SZALL_4U _AC(0x6001000000000000,UL) /* All pgsz bits */
#define _PAGE_SN_4U _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */ #define _PAGE_SN_4U _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */
#define _PAGE_RES2_4U _AC(0x0000780000000000,UL) /* Reserved */ #define _PAGE_RES2_4U _AC(0x0000780000000000,UL) /* Reserved */
#define _PAGE_PADDR_4U _AC(0x000007FFFFFFE000,UL) /* (Cheetah) pa[42:13] */ #define _PAGE_PADDR_4U _AC(0x000007FFFFFFE000,UL) /* (Cheetah) pa[42:13] */
...@@ -150,6 +151,7 @@ ...@@ -150,6 +151,7 @@
#define _PAGE_SZ512K_4V _AC(0x0000000000000002,UL) /* 512K Page */ #define _PAGE_SZ512K_4V _AC(0x0000000000000002,UL) /* 512K Page */
#define _PAGE_SZ64K_4V _AC(0x0000000000000001,UL) /* 64K Page */ #define _PAGE_SZ64K_4V _AC(0x0000000000000001,UL) /* 64K Page */
#define _PAGE_SZ8K_4V _AC(0x0000000000000000,UL) /* 8K Page */ #define _PAGE_SZ8K_4V _AC(0x0000000000000000,UL) /* 8K Page */
#define _PAGE_SZALL_4V _AC(0x0000000000000007,UL) /* All pgsz bits */
#if PAGE_SHIFT == 13 #if PAGE_SHIFT == 13
#define _PAGE_SZBITS_4U _PAGE_SZ8K_4U #define _PAGE_SZBITS_4U _PAGE_SZ8K_4U
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册