提交 c4bce90e 编写于 作者: D David S. Miller

[SPARC64]: Deal with PTE layout differences in SUN4V.

Yes, you heard it right, they changed the PTE layout for
SUN4V.  Ho hum...

This is the simple and inefficient way to support this.
It'll get optimized, don't worry.
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 490384e7
...@@ -6,9 +6,10 @@ ...@@ -6,9 +6,10 @@
nop ! Delay slot (fill me) nop ! Delay slot (fill me)
TSB_LOAD_QUAD(%g1, %g4) ! Load TSB entry TSB_LOAD_QUAD(%g1, %g4) ! Load TSB entry
cmp %g4, %g6 ! Compare TAG cmp %g4, %g6 ! Compare TAG
sethi %hi(_PAGE_EXEC), %g4 ! Setup exec check sethi %hi(PAGE_EXEC), %g4 ! Setup exec check
/* ITLB ** ICACHE line 2: TSB compare and TLB load */ /* ITLB ** ICACHE line 2: TSB compare and TLB load */
ldx [%g4 + %lo(PAGE_EXEC)], %g4
bne,pn %xcc, tsb_miss_itlb ! Miss bne,pn %xcc, tsb_miss_itlb ! Miss
mov FAULT_CODE_ITLB, %g3 mov FAULT_CODE_ITLB, %g3
andcc %g5, %g4, %g0 ! Executable? andcc %g5, %g4, %g0 ! Executable?
...@@ -16,7 +17,6 @@ ...@@ -16,7 +17,6 @@
nop ! Delay slot, fill me nop ! Delay slot, fill me
stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load TLB stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load TLB
retry ! Trap done retry ! Trap done
nop
/* ITLB ** ICACHE line 3: */ /* ITLB ** ICACHE line 3: */
nop nop
......
...@@ -131,16 +131,8 @@ kvmap_dtlb_4v: ...@@ -131,16 +131,8 @@ kvmap_dtlb_4v:
brgez,pn %g4, kvmap_dtlb_nonlinear brgez,pn %g4, kvmap_dtlb_nonlinear
nop nop
#define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000) sethi %hi(kern_linear_pte_xor), %g2
#define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W) ldx [%g2 + %lo(kern_linear_pte_xor)], %g2
sethi %uhi(KERN_HIGHBITS), %g2
or %g2, %ulo(KERN_HIGHBITS), %g2
sllx %g2, 32, %g2
or %g2, KERN_LOWBITS, %g2
#undef KERN_HIGHBITS
#undef KERN_LOWBITS
.globl kvmap_linear_patch .globl kvmap_linear_patch
kvmap_linear_patch: kvmap_linear_patch:
......
...@@ -64,12 +64,6 @@ struct screen_info screen_info = { ...@@ -64,12 +64,6 @@ struct screen_info screen_info = {
16 /* orig-video-points */ 16 /* orig-video-points */
}; };
/* Typing sync at the prom prompt calls the function pointed to by
* the sync callback which I set to the following function.
* This should sync all filesystems and return, for now it just
* prints out pretty messages and returns.
*/
void (*prom_palette)(int); void (*prom_palette)(int);
void (*prom_keyboard)(void); void (*prom_keyboard)(void);
...@@ -79,263 +73,6 @@ prom_console_write(struct console *con, const char *s, unsigned n) ...@@ -79,263 +73,6 @@ prom_console_write(struct console *con, const char *s, unsigned n)
prom_write(s, n); prom_write(s, n);
} }
static struct console prom_console = {
.name = "prom",
.write = prom_console_write,
.flags = CON_CONSDEV | CON_ENABLED,
.index = -1,
};
#define PROM_TRUE -1
#define PROM_FALSE 0
/* Pretty sick eh? */
int prom_callback(long *args)
{
struct console *cons, *saved_console = NULL;
unsigned long flags;
char *cmd;
extern spinlock_t prom_entry_lock;
if (!args)
return -1;
if (!(cmd = (char *)args[0]))
return -1;
/*
* The callback can be invoked on the cpu that first dropped
* into prom_cmdline after taking the serial interrupt, or on
* a slave processor that was smp_captured() if the
* administrator has done a switch-cpu inside obp. In either
* case, the cpu is marked as in-interrupt. Drop IRQ locks.
*/
irq_exit();
/* XXX Revisit the locking here someday. This is a debugging
* XXX feature so it isnt all that critical. -DaveM
*/
local_irq_save(flags);
spin_unlock(&prom_entry_lock);
cons = console_drivers;
while (cons) {
unregister_console(cons);
cons->flags &= ~(CON_PRINTBUFFER);
cons->next = saved_console;
saved_console = cons;
cons = console_drivers;
}
register_console(&prom_console);
if (!strcmp(cmd, "sync")) {
prom_printf("PROM `%s' command...\n", cmd);
show_free_areas();
if (current->pid != 0) {
local_irq_enable();
sys_sync();
local_irq_disable();
}
args[2] = 0;
args[args[1] + 3] = -1;
prom_printf("Returning to PROM\n");
} else if (!strcmp(cmd, "va>tte-data")) {
unsigned long ctx, va;
unsigned long tte = 0;
long res = PROM_FALSE;
ctx = args[3];
va = args[4];
if (ctx) {
/*
* Find process owning ctx, lookup mapping.
*/
struct task_struct *p;
struct mm_struct *mm = NULL;
pgd_t *pgdp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
pte_t pte;
for_each_process(p) {
mm = p->mm;
if (CTX_NRBITS(mm->context) == ctx)
break;
}
if (!mm ||
CTX_NRBITS(mm->context) != ctx)
goto done;
pgdp = pgd_offset(mm, va);
if (pgd_none(*pgdp))
goto done;
pudp = pud_offset(pgdp, va);
if (pud_none(*pudp))
goto done;
pmdp = pmd_offset(pudp, va);
if (pmd_none(*pmdp))
goto done;
/* Preemption implicitly disabled by virtue of
* being called from inside OBP.
*/
ptep = pte_offset_map(pmdp, va);
pte = *ptep;
if (pte_present(pte)) {
tte = pte_val(pte);
res = PROM_TRUE;
}
pte_unmap(ptep);
goto done;
}
if ((va >= KERNBASE) && (va < (KERNBASE + (4 * 1024 * 1024)))) {
if (tlb_type == spitfire) {
extern unsigned long sparc64_kern_pri_context;
/* Spitfire Errata #32 workaround */
__asm__ __volatile__(
"stxa %0, [%1] %2\n\t"
"flush %%g6"
: /* No outputs */
: "r" (sparc64_kern_pri_context),
"r" (PRIMARY_CONTEXT),
"i" (ASI_DMMU));
}
/*
* Locked down tlb entry.
*/
if (tlb_type == spitfire) {
tte = spitfire_get_dtlb_data(SPITFIRE_HIGHEST_LOCKED_TLBENT);
res = PROM_TRUE;
} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
tte = cheetah_get_ldtlb_data(CHEETAH_HIGHEST_LOCKED_TLBENT);
res = PROM_TRUE;
}
goto done;
}
if (va < PGDIR_SIZE) {
/*
* vmalloc or prom_inherited mapping.
*/
pgd_t *pgdp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
pte_t pte;
int error;
if ((va >= LOW_OBP_ADDRESS) && (va < HI_OBP_ADDRESS)) {
tte = prom_virt_to_phys(va, &error);
if (!error)
res = PROM_TRUE;
goto done;
}
pgdp = pgd_offset_k(va);
if (pgd_none(*pgdp))
goto done;
pudp = pud_offset(pgdp, va);
if (pud_none(*pudp))
goto done;
pmdp = pmd_offset(pudp, va);
if (pmd_none(*pmdp))
goto done;
/* Preemption implicitly disabled by virtue of
* being called from inside OBP.
*/
ptep = pte_offset_kernel(pmdp, va);
pte = *ptep;
if (pte_present(pte)) {
tte = pte_val(pte);
res = PROM_TRUE;
}
goto done;
}
if (va < PAGE_OFFSET) {
/*
* No mappings here.
*/
goto done;
}
if (va & (1UL << 40)) {
/*
* I/O page.
*/
tte = (__pa(va) & _PAGE_PADDR) |
_PAGE_VALID | _PAGE_SZ4MB |
_PAGE_E | _PAGE_P | _PAGE_W;
res = PROM_TRUE;
goto done;
}
/*
* Normal page.
*/
tte = (__pa(va) & _PAGE_PADDR) |
_PAGE_VALID | _PAGE_SZ4MB |
_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W;
res = PROM_TRUE;
done:
if (res == PROM_TRUE) {
args[2] = 3;
args[args[1] + 3] = 0;
args[args[1] + 4] = res;
args[args[1] + 5] = tte;
} else {
args[2] = 2;
args[args[1] + 3] = 0;
args[args[1] + 4] = res;
}
} else if (!strcmp(cmd, ".soft1")) {
unsigned long tte;
tte = args[3];
prom_printf("%lx:\"%s%s%s%s%s\" ",
(tte & _PAGE_SOFT) >> 7,
tte & _PAGE_MODIFIED ? "M" : "-",
tte & _PAGE_ACCESSED ? "A" : "-",
tte & _PAGE_READ ? "W" : "-",
tte & _PAGE_WRITE ? "R" : "-",
tte & _PAGE_PRESENT ? "P" : "-");
args[2] = 2;
args[args[1] + 3] = 0;
args[args[1] + 4] = PROM_TRUE;
} else if (!strcmp(cmd, ".soft2")) {
unsigned long tte;
tte = args[3];
prom_printf("%lx ", (tte & 0x07FC000000000000UL) >> 50);
args[2] = 2;
args[args[1] + 3] = 0;
args[args[1] + 4] = PROM_TRUE;
} else {
prom_printf("unknown PROM `%s' command...\n", cmd);
}
unregister_console(&prom_console);
while (saved_console) {
cons = saved_console;
saved_console = cons->next;
register_console(cons);
}
spin_lock(&prom_entry_lock);
local_irq_restore(flags);
/*
* Restore in-interrupt status for a resume from obp.
*/
irq_enter();
return 0;
}
unsigned int boot_flags = 0; unsigned int boot_flags = 0;
#define BOOTME_DEBUG 0x1 #define BOOTME_DEBUG 0x1
#define BOOTME_SINGLE 0x2 #define BOOTME_SINGLE 0x2
...@@ -483,17 +220,6 @@ char reboot_command[COMMAND_LINE_SIZE]; ...@@ -483,17 +220,6 @@ char reboot_command[COMMAND_LINE_SIZE];
static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 }; static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
void register_prom_callbacks(void)
{
prom_setcallback(prom_callback);
prom_feval(": linux-va>tte-data 2 \" va>tte-data\" $callback drop ; "
"' linux-va>tte-data to va>tte-data");
prom_feval(": linux-.soft1 1 \" .soft1\" $callback 2drop ; "
"' linux-.soft1 to .soft1");
prom_feval(": linux-.soft2 1 \" .soft2\" $callback 2drop ; "
"' linux-.soft2 to .soft2");
}
static void __init per_cpu_patch(void) static void __init per_cpu_patch(void)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -59,7 +59,8 @@ sun4v_itlb_miss: ...@@ -59,7 +59,8 @@ sun4v_itlb_miss:
/* Load TSB tag/pte into %g2/%g3 and compare the tag. */ /* Load TSB tag/pte into %g2/%g3 and compare the tag. */
ldda [%g1] ASI_QUAD_LDD_PHYS, %g2 ldda [%g1] ASI_QUAD_LDD_PHYS, %g2
cmp %g2, %g6 cmp %g2, %g6
sethi %hi(_PAGE_EXEC), %g7 sethi %hi(PAGE_EXEC), %g7
ldx [%g7 + %lo(PAGE_EXEC)], %g7
bne,a,pn %xcc, tsb_miss_page_table_walk bne,a,pn %xcc, tsb_miss_page_table_walk
mov FAULT_CODE_ITLB, %g3 mov FAULT_CODE_ITLB, %g3
andcc %g3, %g7, %g0 andcc %g3, %g7, %g0
......
...@@ -56,10 +56,11 @@ tsb_reload: ...@@ -56,10 +56,11 @@ tsb_reload:
/* If it is larger than the base page size, don't /* If it is larger than the base page size, don't
* bother putting it into the TSB. * bother putting it into the TSB.
*/ */
srlx %g5, 32, %g2 sethi %hi(_PAGE_ALL_SZ_BITS), %g7
sethi %hi(_PAGE_ALL_SZ_BITS >> 32), %g7 ldx [%g7 + %lo(_PAGE_ALL_SZ_BITS)], %g7
and %g2, %g7, %g2 and %g5, %g7, %g2
sethi %hi(_PAGE_SZBITS >> 32), %g7 sethi %hi(_PAGE_SZBITS), %g7
ldx [%g7 + %lo(_PAGE_SZBITS)], %g7
cmp %g2, %g7 cmp %g2, %g7
bne,a,pn %xcc, tsb_tlb_reload bne,a,pn %xcc, tsb_tlb_reload
TSB_STORE(%g1, %g0) TSB_STORE(%g1, %g0)
......
...@@ -23,9 +23,6 @@ ...@@ -23,9 +23,6 @@
* disable preemption during the clear. * disable preemption during the clear.
*/ */
#define TTE_BITS_TOP (_PAGE_VALID | _PAGE_SZBITS)
#define TTE_BITS_BOTTOM (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W)
.text .text
.globl _clear_page .globl _clear_page
...@@ -44,12 +41,11 @@ clear_user_page: /* %o0=dest, %o1=vaddr */ ...@@ -44,12 +41,11 @@ clear_user_page: /* %o0=dest, %o1=vaddr */
sethi %hi(PAGE_SIZE), %o4 sethi %hi(PAGE_SIZE), %o4
sllx %g2, 32, %g2 sllx %g2, 32, %g2
sethi %uhi(TTE_BITS_TOP), %g3 sethi %hi(PAGE_KERNEL_LOCKED), %g3
sllx %g3, 32, %g3 ldx [%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3
sub %o0, %g2, %g1 ! paddr sub %o0, %g2, %g1 ! paddr
or %g3, TTE_BITS_BOTTOM, %g3
and %o1, %o4, %o0 ! vaddr D-cache alias bit and %o1, %o4, %o0 ! vaddr D-cache alias bit
or %g1, %g3, %g1 ! TTE data or %g1, %g3, %g1 ! TTE data
......
...@@ -23,8 +23,6 @@ ...@@ -23,8 +23,6 @@
* disable preemption during the clear. * disable preemption during the clear.
*/ */
#define TTE_BITS_TOP (_PAGE_VALID | _PAGE_SZBITS)
#define TTE_BITS_BOTTOM (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W)
#define DCACHE_SIZE (PAGE_SIZE * 2) #define DCACHE_SIZE (PAGE_SIZE * 2)
#if (PAGE_SHIFT == 13) || (PAGE_SHIFT == 19) #if (PAGE_SHIFT == 13) || (PAGE_SHIFT == 19)
...@@ -52,13 +50,12 @@ copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */ ...@@ -52,13 +50,12 @@ copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */
sethi %hi(PAGE_SIZE), %o3 sethi %hi(PAGE_SIZE), %o3
sllx %g2, 32, %g2 sllx %g2, 32, %g2
sethi %uhi(TTE_BITS_TOP), %g3 sethi %hi(PAGE_KERNEL_LOCKED), %g3
sllx %g3, 32, %g3 ldx [%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3
sub %o0, %g2, %g1 ! dest paddr sub %o0, %g2, %g1 ! dest paddr
sub %o1, %g2, %g2 ! src paddr sub %o1, %g2, %g2 ! src paddr
or %g3, TTE_BITS_BOTTOM, %g3
and %o2, %o3, %o0 ! vaddr D-cache alias bit and %o2, %o3, %o0 ! vaddr D-cache alias bit
or %g1, %g3, %g1 ! dest TTE data or %g1, %g3, %g1 ! dest TTE data
......
...@@ -137,7 +137,7 @@ static unsigned int get_user_insn(unsigned long tpc) ...@@ -137,7 +137,7 @@ static unsigned int get_user_insn(unsigned long tpc)
if (!pte_present(pte)) if (!pte_present(pte))
goto out; goto out;
pa = (pte_val(pte) & _PAGE_PADDR); pa = (pte_pfn(pte) << PAGE_SHIFT);
pa += (tpc & ~PAGE_MASK); pa += (tpc & ~PAGE_MASK);
/* Use phys bypass so we don't pollute dtlb/dcache. */ /* Use phys bypass so we don't pollute dtlb/dcache. */
......
...@@ -15,15 +15,6 @@ ...@@ -15,15 +15,6 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
static inline pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space)
{
pte_t pte;
pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E) &
~(unsigned long)_PAGE_CACHE);
pte_val(pte) |= (((unsigned long)space) << 32);
return pte;
}
/* Remap IO memory, the same way as remap_pfn_range(), but use /* Remap IO memory, the same way as remap_pfn_range(), but use
* the obio memory space. * the obio memory space.
* *
...@@ -48,24 +39,29 @@ static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, ...@@ -48,24 +39,29 @@ static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
pte_t entry; pte_t entry;
unsigned long curend = address + PAGE_SIZE; unsigned long curend = address + PAGE_SIZE;
entry = mk_pte_io(offset, prot, space); entry = mk_pte_io(offset, prot, space, PAGE_SIZE);
if (!(address & 0xffff)) { if (!(address & 0xffff)) {
if (!(address & 0x3fffff) && !(offset & 0x3ffffe) && end >= address + 0x400000) { if (PAGE_SIZE < (4 * 1024 * 1024) &&
entry = mk_pte_io(offset, !(address & 0x3fffff) &&
__pgprot(pgprot_val (prot) | _PAGE_SZ4MB), !(offset & 0x3ffffe) &&
space); end >= address + 0x400000) {
entry = mk_pte_io(offset, prot, space,
4 * 1024 * 1024);
curend = address + 0x400000; curend = address + 0x400000;
offset += 0x400000; offset += 0x400000;
} else if (!(address & 0x7ffff) && !(offset & 0x7fffe) && end >= address + 0x80000) { } else if (PAGE_SIZE < (512 * 1024) &&
entry = mk_pte_io(offset, !(address & 0x7ffff) &&
__pgprot(pgprot_val (prot) | _PAGE_SZ512K), !(offset & 0x7fffe) &&
space); end >= address + 0x80000) {
entry = mk_pte_io(offset, prot, space,
512 * 1024 * 1024);
curend = address + 0x80000; curend = address + 0x80000;
offset += 0x80000; offset += 0x80000;
} else if (!(offset & 0xfffe) && end >= address + 0x10000) { } else if (PAGE_SIZE < (64 * 1024) &&
entry = mk_pte_io(offset, !(offset & 0xfffe) &&
__pgprot(pgprot_val (prot) | _PAGE_SZ64K), end >= address + 0x10000) {
space); entry = mk_pte_io(offset, prot, space,
64 * 1024);
curend = address + 0x10000; curend = address + 0x10000;
offset += 0x10000; offset += 0x10000;
} else } else
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/string.h> #include <linux/string.h>
...@@ -118,6 +119,7 @@ unsigned long phys_base __read_mostly; ...@@ -118,6 +119,7 @@ unsigned long phys_base __read_mostly;
unsigned long kern_base __read_mostly; unsigned long kern_base __read_mostly;
unsigned long kern_size __read_mostly; unsigned long kern_size __read_mostly;
unsigned long pfn_base __read_mostly; unsigned long pfn_base __read_mostly;
unsigned long kern_linear_pte_xor __read_mostly;
/* get_new_mmu_context() uses "cache + 1". */ /* get_new_mmu_context() uses "cache + 1". */
DEFINE_SPINLOCK(ctx_alloc_lock); DEFINE_SPINLOCK(ctx_alloc_lock);
...@@ -256,6 +258,9 @@ static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long ...@@ -256,6 +258,9 @@ static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long
__tsb_insert(tsb_addr, tag, pte); __tsb_insert(tsb_addr, tag, pte);
} }
unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
unsigned long _PAGE_SZBITS __read_mostly;
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{ {
struct mm_struct *mm; struct mm_struct *mm;
...@@ -398,39 +403,9 @@ struct linux_prom_translation { ...@@ -398,39 +403,9 @@ struct linux_prom_translation {
struct linux_prom_translation prom_trans[512] __read_mostly; struct linux_prom_translation prom_trans[512] __read_mostly;
unsigned int prom_trans_ents __read_mostly; unsigned int prom_trans_ents __read_mostly;
extern unsigned long prom_boot_page;
extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle);
extern int prom_get_mmu_ihandle(void);
extern void register_prom_callbacks(void);
/* Exported for SMP bootup purposes. */ /* Exported for SMP bootup purposes. */
unsigned long kern_locked_tte_data; unsigned long kern_locked_tte_data;
/*
* Translate PROM's mapping we capture at boot time into physical address.
* The second parameter is only set from prom_callback() invocations.
*/
unsigned long prom_virt_to_phys(unsigned long promva, int *error)
{
int i;
for (i = 0; i < prom_trans_ents; i++) {
struct linux_prom_translation *p = &prom_trans[i];
if (promva >= p->virt &&
promva < (p->virt + p->size)) {
unsigned long base = p->data & _PAGE_PADDR;
if (error)
*error = 0;
return base + (promva & (8192 - 1));
}
}
if (error)
*error = 1;
return 0UL;
}
/* The obp translations are saved based on 8k pagesize, since obp can /* The obp translations are saved based on 8k pagesize, since obp can
* use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS -> * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
* HI_OBP_ADDRESS range are handled in ktlb.S. * HI_OBP_ADDRESS range are handled in ktlb.S.
...@@ -537,6 +512,8 @@ static void __init hypervisor_tlb_lock(unsigned long vaddr, ...@@ -537,6 +512,8 @@ static void __init hypervisor_tlb_lock(unsigned long vaddr,
"3" (arg2), "4" (arg3)); "3" (arg2), "4" (arg3));
} }
static unsigned long kern_large_tte(unsigned long paddr);
static void __init remap_kernel(void) static void __init remap_kernel(void)
{ {
unsigned long phys_page, tte_vaddr, tte_data; unsigned long phys_page, tte_vaddr, tte_data;
...@@ -544,9 +521,7 @@ static void __init remap_kernel(void) ...@@ -544,9 +521,7 @@ static void __init remap_kernel(void)
tte_vaddr = (unsigned long) KERNBASE; tte_vaddr = (unsigned long) KERNBASE;
phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
tte_data = (phys_page | (_PAGE_VALID | _PAGE_SZ4MB | tte_data = kern_large_tte(phys_page);
_PAGE_CP | _PAGE_CV | _PAGE_P |
_PAGE_L | _PAGE_W));
kern_locked_tte_data = tte_data; kern_locked_tte_data = tte_data;
...@@ -591,10 +566,6 @@ static void __init inherit_prom_mappings(void) ...@@ -591,10 +566,6 @@ static void __init inherit_prom_mappings(void)
prom_printf("Remapping the kernel... "); prom_printf("Remapping the kernel... ");
remap_kernel(); remap_kernel();
prom_printf("done.\n"); prom_printf("done.\n");
prom_printf("Registering callbacks... ");
register_prom_callbacks();
prom_printf("done.\n");
} }
void prom_world(int enter) void prom_world(int enter)
...@@ -631,63 +602,6 @@ void __flush_dcache_range(unsigned long start, unsigned long end) ...@@ -631,63 +602,6 @@ void __flush_dcache_range(unsigned long start, unsigned long end)
} }
#endif /* DCACHE_ALIASING_POSSIBLE */ #endif /* DCACHE_ALIASING_POSSIBLE */
/* If not locked, zap it. */
void __flush_tlb_all(void)
{
unsigned long pstate;
int i;
__asm__ __volatile__("flushw\n\t"
"rdpr %%pstate, %0\n\t"
"wrpr %0, %1, %%pstate"
: "=r" (pstate)
: "i" (PSTATE_IE));
if (tlb_type == spitfire) {
for (i = 0; i < 64; i++) {
/* Spitfire Errata #32 workaround */
/* NOTE: Always runs on spitfire, so no
* cheetah+ page size encodings.
*/
__asm__ __volatile__("stxa %0, [%1] %2\n\t"
"flush %%g6"
: /* No outputs */
: "r" (0),
"r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
if (!(spitfire_get_dtlb_data(i) & _PAGE_L)) {
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync"
: /* no outputs */
: "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
spitfire_put_dtlb_data(i, 0x0UL);
}
/* Spitfire Errata #32 workaround */
/* NOTE: Always runs on spitfire, so no
* cheetah+ page size encodings.
*/
__asm__ __volatile__("stxa %0, [%1] %2\n\t"
"flush %%g6"
: /* No outputs */
: "r" (0),
"r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
if (!(spitfire_get_itlb_data(i) & _PAGE_L)) {
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync"
: /* no outputs */
: "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
spitfire_put_itlb_data(i, 0x0UL);
}
}
} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
cheetah_flush_dtlb_all();
cheetah_flush_itlb_all();
}
__asm__ __volatile__("wrpr %0, 0, %%pstate"
: : "r" (pstate));
}
/* Caller does TLB context flushing on local CPU if necessary. /* Caller does TLB context flushing on local CPU if necessary.
* The caller also ensures that CTX_VALID(mm->context) is false. * The caller also ensures that CTX_VALID(mm->context) is false.
* *
...@@ -1180,6 +1094,9 @@ extern void sun4v_patch_tlb_handlers(void); ...@@ -1180,6 +1094,9 @@ extern void sun4v_patch_tlb_handlers(void);
static unsigned long last_valid_pfn; static unsigned long last_valid_pfn;
pgd_t swapper_pg_dir[2048]; pgd_t swapper_pg_dir[2048];
static void sun4u_pgprot_init(void);
static void sun4v_pgprot_init(void);
void __init paging_init(void) void __init paging_init(void)
{ {
unsigned long end_pfn, pages_avail, shift; unsigned long end_pfn, pages_avail, shift;
...@@ -1188,6 +1105,11 @@ void __init paging_init(void) ...@@ -1188,6 +1105,11 @@ void __init paging_init(void)
kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
if (tlb_type == hypervisor)
sun4v_pgprot_init();
else
sun4u_pgprot_init();
if (tlb_type == cheetah_plus || if (tlb_type == cheetah_plus ||
tlb_type == hypervisor) tlb_type == hypervisor)
tsb_phys_patch(); tsb_phys_patch();
...@@ -1411,3 +1333,596 @@ void free_initrd_mem(unsigned long start, unsigned long end) ...@@ -1411,3 +1333,596 @@ void free_initrd_mem(unsigned long start, unsigned long end)
} }
} }
#endif #endif
/* SUN4U pte bits... */
#define _PAGE_SZ4MB_4U 0x6000000000000000 /* 4MB Page */
#define _PAGE_SZ512K_4U 0x4000000000000000 /* 512K Page */
#define _PAGE_SZ64K_4U 0x2000000000000000 /* 64K Page */
#define _PAGE_SZ8K_4U 0x0000000000000000 /* 8K Page */
#define _PAGE_NFO_4U 0x1000000000000000 /* No Fault Only */
#define _PAGE_IE_4U 0x0800000000000000 /* Invert Endianness */
#define _PAGE_SOFT2_4U 0x07FC000000000000 /* Software bits, set 2 */
#define _PAGE_RES1_4U 0x0002000000000000 /* Reserved */
#define _PAGE_SZ32MB_4U 0x0001000000000000 /* (Panther) 32MB page */
#define _PAGE_SZ256MB_4U 0x2001000000000000 /* (Panther) 256MB page */
#define _PAGE_SN_4U 0x0000800000000000 /* (Cheetah) Snoop */
#define _PAGE_RES2_4U 0x0000780000000000 /* Reserved */
#define _PAGE_PADDR_4U 0x000007FFFFFFE000 /* (Cheetah) paddr[42:13] */
#define _PAGE_SOFT_4U 0x0000000000001F80 /* Software bits: */
#define _PAGE_EXEC_4U 0x0000000000001000 /* Executable SW bit */
#define _PAGE_MODIFIED_4U 0x0000000000000800 /* Modified (dirty) */
#define _PAGE_FILE_4U 0x0000000000000800 /* Pagecache page */
#define _PAGE_ACCESSED_4U 0x0000000000000400 /* Accessed (ref'd) */
#define _PAGE_READ_4U 0x0000000000000200 /* Readable SW Bit */
#define _PAGE_WRITE_4U 0x0000000000000100 /* Writable SW Bit */
#define _PAGE_PRESENT_4U 0x0000000000000080 /* Present */
#define _PAGE_L_4U 0x0000000000000040 /* Locked TTE */
#define _PAGE_CP_4U 0x0000000000000020 /* Cacheable in P-Cache */
#define _PAGE_CV_4U 0x0000000000000010 /* Cacheable in V-Cache */
#define _PAGE_E_4U 0x0000000000000008 /* side-Effect */
#define _PAGE_P_4U 0x0000000000000004 /* Privileged Page */
#define _PAGE_W_4U 0x0000000000000002 /* Writable */
/* SUN4V pte bits... */
#define _PAGE_NFO_4V 0x4000000000000000 /* No Fault Only */
#define _PAGE_SOFT2_4V 0x3F00000000000000 /* Software bits, set 2 */
#define _PAGE_MODIFIED_4V 0x2000000000000000 /* Modified (dirty) */
#define _PAGE_ACCESSED_4V 0x1000000000000000 /* Accessed (ref'd) */
#define _PAGE_READ_4V 0x0800000000000000 /* Readable SW Bit */
#define _PAGE_WRITE_4V 0x0400000000000000 /* Writable SW Bit */
#define _PAGE_PADDR_4V 0x00FFFFFFFFFFE000 /* paddr[55:13] */
#define _PAGE_IE_4V 0x0000000000001000 /* Invert Endianness */
#define _PAGE_E_4V 0x0000000000000800 /* side-Effect */
#define _PAGE_CP_4V 0x0000000000000400 /* Cacheable in P-Cache */
#define _PAGE_CV_4V 0x0000000000000200 /* Cacheable in V-Cache */
#define _PAGE_P_4V 0x0000000000000100 /* Privileged Page */
#define _PAGE_EXEC_4V 0x0000000000000080 /* Executable Page */
#define _PAGE_W_4V 0x0000000000000040 /* Writable */
#define _PAGE_SOFT_4V 0x0000000000000030 /* Software bits */
#define _PAGE_FILE_4V 0x0000000000000020 /* Pagecache page */
#define _PAGE_PRESENT_4V 0x0000000000000010 /* Present */
#define _PAGE_RESV_4V 0x0000000000000008 /* Reserved */
#define _PAGE_SZ16GB_4V 0x0000000000000007 /* 16GB Page */
#define _PAGE_SZ2GB_4V 0x0000000000000006 /* 2GB Page */
#define _PAGE_SZ256MB_4V 0x0000000000000005 /* 256MB Page */
#define _PAGE_SZ32MB_4V 0x0000000000000004 /* 32MB Page */
#define _PAGE_SZ4MB_4V 0x0000000000000003 /* 4MB Page */
#define _PAGE_SZ512K_4V 0x0000000000000002 /* 512K Page */
#define _PAGE_SZ64K_4V 0x0000000000000001 /* 64K Page */
#define _PAGE_SZ8K_4V 0x0000000000000000 /* 8K Page */
#if PAGE_SHIFT == 13
#define _PAGE_SZBITS_4U _PAGE_SZ8K_4U
#define _PAGE_SZBITS_4V _PAGE_SZ8K_4V
#elif PAGE_SHIFT == 16
#define _PAGE_SZBITS_4U _PAGE_SZ64K_4U
#define _PAGE_SZBITS_4V _PAGE_SZ64K_4V
#elif PAGE_SHIFT == 19
#define _PAGE_SZBITS_4U _PAGE_SZ512K_4U
#define _PAGE_SZBITS_4V _PAGE_SZ512K_4V
#elif PAGE_SHIFT == 22
#define _PAGE_SZBITS_4U _PAGE_SZ4MB_4U
#define _PAGE_SZBITS_4V _PAGE_SZ4MB_4V
#else
#error Wrong PAGE_SHIFT specified
#endif
#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
#define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U
#define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
#define _PAGE_SZHUGE_4U _PAGE_SZ512K_4U
#define _PAGE_SZHUGE_4V _PAGE_SZ512K_4V
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
#define _PAGE_SZHUGE_4U _PAGE_SZ64K_4U
#define _PAGE_SZHUGE_4V _PAGE_SZ64K_4V
#endif
#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
#define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
#define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
pgprot_t PAGE_KERNEL __read_mostly;
EXPORT_SYMBOL(PAGE_KERNEL);
pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
pgprot_t PAGE_COPY __read_mostly;
pgprot_t PAGE_EXEC __read_mostly;
unsigned long pg_iobits __read_mostly;
unsigned long _PAGE_IE __read_mostly;
unsigned long _PAGE_E __read_mostly;
unsigned long _PAGE_CACHE __read_mostly;
static void prot_init_common(unsigned long page_none,
unsigned long page_shared,
unsigned long page_copy,
unsigned long page_readonly,
unsigned long page_exec_bit)
{
PAGE_COPY = __pgprot(page_copy);
protection_map[0x0] = __pgprot(page_none);
protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
protection_map[0x4] = __pgprot(page_readonly);
protection_map[0x5] = __pgprot(page_readonly);
protection_map[0x6] = __pgprot(page_copy);
protection_map[0x7] = __pgprot(page_copy);
protection_map[0x8] = __pgprot(page_none);
protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
protection_map[0xc] = __pgprot(page_readonly);
protection_map[0xd] = __pgprot(page_readonly);
protection_map[0xe] = __pgprot(page_shared);
protection_map[0xf] = __pgprot(page_shared);
}
static void __init sun4u_pgprot_init(void)
{
unsigned long page_none, page_shared, page_copy, page_readonly;
unsigned long page_exec_bit;
PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
_PAGE_CACHE_4U | _PAGE_P_4U |
__ACCESS_BITS_4U | __DIRTY_BITS_4U |
_PAGE_EXEC_4U);
PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
_PAGE_CACHE_4U | _PAGE_P_4U |
__ACCESS_BITS_4U | __DIRTY_BITS_4U |
_PAGE_EXEC_4U | _PAGE_L_4U);
PAGE_EXEC = __pgprot(_PAGE_EXEC_4U);
_PAGE_IE = _PAGE_IE_4U;
_PAGE_E = _PAGE_E_4U;
_PAGE_CACHE = _PAGE_CACHE_4U;
pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
__ACCESS_BITS_4U | _PAGE_E_4U);
kern_linear_pte_xor = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
0xfffff80000000000;
kern_linear_pte_xor |= (_PAGE_CP_4U | _PAGE_CV_4U |
_PAGE_P_4U | _PAGE_W_4U);
_PAGE_SZBITS = _PAGE_SZBITS_4U;
_PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
_PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
_PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
__ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
__ACCESS_BITS_4U | _PAGE_EXEC_4U);
page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
__ACCESS_BITS_4U | _PAGE_EXEC_4U);
page_exec_bit = _PAGE_EXEC_4U;
prot_init_common(page_none, page_shared, page_copy, page_readonly,
page_exec_bit);
}
static void __init sun4v_pgprot_init(void)
{
unsigned long page_none, page_shared, page_copy, page_readonly;
unsigned long page_exec_bit;
PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
_PAGE_CACHE_4V | _PAGE_P_4V |
__ACCESS_BITS_4V | __DIRTY_BITS_4V |
_PAGE_EXEC_4V);
PAGE_KERNEL_LOCKED = PAGE_KERNEL;
PAGE_EXEC = __pgprot(_PAGE_EXEC_4V);
_PAGE_IE = _PAGE_IE_4V;
_PAGE_E = _PAGE_E_4V;
_PAGE_CACHE = _PAGE_CACHE_4V;
kern_linear_pte_xor = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
0xfffff80000000000;
kern_linear_pte_xor |= (_PAGE_CP_4V | _PAGE_CV_4V |
_PAGE_P_4V | _PAGE_W_4V);
pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
__ACCESS_BITS_4V | _PAGE_E_4V);
_PAGE_SZBITS = _PAGE_SZBITS_4V;
_PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
_PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
_PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
_PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V;
page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
__ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
__ACCESS_BITS_4V | _PAGE_EXEC_4V);
page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
__ACCESS_BITS_4V | _PAGE_EXEC_4V);
page_exec_bit = _PAGE_EXEC_4V;
prot_init_common(page_none, page_shared, page_copy, page_readonly,
page_exec_bit);
}
unsigned long pte_sz_bits(unsigned long sz)
{
if (tlb_type == hypervisor) {
switch (sz) {
case 8 * 1024:
default:
return _PAGE_SZ8K_4V;
case 64 * 1024:
return _PAGE_SZ64K_4V;
case 512 * 1024:
return _PAGE_SZ512K_4V;
case 4 * 1024 * 1024:
return _PAGE_SZ4MB_4V;
};
} else {
switch (sz) {
case 8 * 1024:
default:
return _PAGE_SZ8K_4U;
case 64 * 1024:
return _PAGE_SZ64K_4U;
case 512 * 1024:
return _PAGE_SZ512K_4U;
case 4 * 1024 * 1024:
return _PAGE_SZ4MB_4U;
};
}
}
pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
{
pte_t pte;
if (tlb_type == hypervisor) {
pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E_4V) &
~(unsigned long)_PAGE_CACHE_4V);
} else {
pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E_4U) &
~(unsigned long)_PAGE_CACHE_4U);
}
pte_val(pte) |= (((unsigned long)space) << 32);
pte_val(pte) |= pte_sz_bits(page_size);
return pte;
}
unsigned long pte_present(pte_t pte)
{
return (pte_val(pte) &
((tlb_type == hypervisor) ?
_PAGE_PRESENT_4V : _PAGE_PRESENT_4U));
}
unsigned long pte_file(pte_t pte)
{
return (pte_val(pte) &
((tlb_type == hypervisor) ?
_PAGE_FILE_4V : _PAGE_FILE_4U));
}
unsigned long pte_read(pte_t pte)
{
return (pte_val(pte) &
((tlb_type == hypervisor) ?
_PAGE_READ_4V : _PAGE_READ_4U));
}
unsigned long pte_exec(pte_t pte)
{
return (pte_val(pte) &
((tlb_type == hypervisor) ?
_PAGE_EXEC_4V : _PAGE_EXEC_4U));
}
unsigned long pte_write(pte_t pte)
{
return (pte_val(pte) &
((tlb_type == hypervisor) ?
_PAGE_WRITE_4V : _PAGE_WRITE_4U));
}
unsigned long pte_dirty(pte_t pte)
{
return (pte_val(pte) &
((tlb_type == hypervisor) ?
_PAGE_MODIFIED_4V : _PAGE_MODIFIED_4U));
}
unsigned long pte_young(pte_t pte)
{
return (pte_val(pte) &
((tlb_type == hypervisor) ?
_PAGE_ACCESSED_4V : _PAGE_ACCESSED_4U));
}
pte_t pte_wrprotect(pte_t pte)
{
unsigned long mask = _PAGE_WRITE_4U | _PAGE_W_4U;
if (tlb_type == hypervisor)
mask = _PAGE_WRITE_4V | _PAGE_W_4V;
return __pte(pte_val(pte) & ~mask);
}
pte_t pte_rdprotect(pte_t pte)
{
unsigned long mask = _PAGE_R | _PAGE_READ_4U;
if (tlb_type == hypervisor)
mask = _PAGE_R | _PAGE_READ_4V;
return __pte(pte_val(pte) & ~mask);
}
pte_t pte_mkclean(pte_t pte)
{
unsigned long mask = _PAGE_MODIFIED_4U | _PAGE_W_4U;
if (tlb_type == hypervisor)
mask = _PAGE_MODIFIED_4V | _PAGE_W_4V;
return __pte(pte_val(pte) & ~mask);
}
pte_t pte_mkold(pte_t pte)
{
unsigned long mask = _PAGE_R | _PAGE_ACCESSED_4U;
if (tlb_type == hypervisor)
mask = _PAGE_R | _PAGE_ACCESSED_4V;
return __pte(pte_val(pte) & ~mask);
}
pte_t pte_mkyoung(pte_t pte)
{
unsigned long mask = _PAGE_R | _PAGE_ACCESSED_4U;
if (tlb_type == hypervisor)
mask = _PAGE_R | _PAGE_ACCESSED_4V;
return __pte(pte_val(pte) | mask);
}
pte_t pte_mkwrite(pte_t pte)
{
unsigned long mask = _PAGE_WRITE_4U;
if (tlb_type == hypervisor)
mask = _PAGE_WRITE_4V;
return __pte(pte_val(pte) | mask);
}
pte_t pte_mkdirty(pte_t pte)
{
unsigned long mask = _PAGE_MODIFIED_4U | _PAGE_W_4U;
if (tlb_type == hypervisor)
mask = _PAGE_MODIFIED_4V | _PAGE_W_4V;
return __pte(pte_val(pte) | mask);
}
pte_t pte_mkhuge(pte_t pte)
{
unsigned long mask = _PAGE_SZHUGE_4U;
if (tlb_type == hypervisor)
mask = _PAGE_SZHUGE_4V;
return __pte(pte_val(pte) | mask);
}
pte_t pgoff_to_pte(unsigned long off)
{
unsigned long bit = _PAGE_FILE_4U;
if (tlb_type == hypervisor)
bit = _PAGE_FILE_4V;
return __pte((off << PAGE_SHIFT) | bit);
}
pgprot_t pgprot_noncached(pgprot_t prot)
{
unsigned long val = pgprot_val(prot);
unsigned long off = _PAGE_CP_4U | _PAGE_CV_4U;
unsigned long on = _PAGE_E_4U;
if (tlb_type == hypervisor) {
off = _PAGE_CP_4V | _PAGE_CV_4V;
on = _PAGE_E_4V;
}
return __pgprot((val & ~off) | on);
}
pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
{
unsigned long sz_bits = _PAGE_SZBITS_4U;
if (tlb_type == hypervisor)
sz_bits = _PAGE_SZBITS_4V;
return __pte((pfn << PAGE_SHIFT) | pgprot_val(prot) | sz_bits);
}
unsigned long pte_pfn(pte_t pte)
{
unsigned long mask = _PAGE_PADDR_4U;
if (tlb_type == hypervisor)
mask = _PAGE_PADDR_4V;
return (pte_val(pte) & mask) >> PAGE_SHIFT;
}
pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
{
unsigned long preserve_mask;
unsigned long val;
preserve_mask = (_PAGE_PADDR_4U |
_PAGE_MODIFIED_4U |
_PAGE_ACCESSED_4U |
_PAGE_CP_4U |
_PAGE_CV_4U |
_PAGE_E_4U |
_PAGE_PRESENT_4U |
_PAGE_SZBITS_4U);
if (tlb_type == hypervisor)
preserve_mask = (_PAGE_PADDR_4V |
_PAGE_MODIFIED_4V |
_PAGE_ACCESSED_4V |
_PAGE_CP_4V |
_PAGE_CV_4V |
_PAGE_E_4V |
_PAGE_PRESENT_4V |
_PAGE_SZBITS_4V);
val = (pte_val(orig_pte) & preserve_mask);
return __pte(val | (pgprot_val(new_prot) & ~preserve_mask));
}
static unsigned long kern_large_tte(unsigned long paddr)
{
unsigned long val;
val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
_PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
_PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
if (tlb_type == hypervisor)
val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
_PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V |
_PAGE_EXEC_4V | _PAGE_W_4V);
return val | paddr;
}
/*
* Translate PROM's mapping we capture at boot time into physical address.
* The second parameter is only set from prom_callback() invocations.
*/
unsigned long prom_virt_to_phys(unsigned long promva, int *error)
{
unsigned long mask;
int i;
mask = _PAGE_PADDR_4U;
if (tlb_type == hypervisor)
mask = _PAGE_PADDR_4V;
for (i = 0; i < prom_trans_ents; i++) {
struct linux_prom_translation *p = &prom_trans[i];
if (promva >= p->virt &&
promva < (p->virt + p->size)) {
unsigned long base = p->data & mask;
if (error)
*error = 0;
return base + (promva & (8192 - 1));
}
}
if (error)
*error = 1;
return 0UL;
}
/* XXX We should kill off this ugly thing at so me point. XXX */
unsigned long sun4u_get_pte(unsigned long addr)
{
pgd_t *pgdp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
unsigned long mask = _PAGE_PADDR_4U;
if (tlb_type == hypervisor)
mask = _PAGE_PADDR_4V;
if (addr >= PAGE_OFFSET)
return addr & mask;
if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS))
return prom_virt_to_phys(addr, NULL);
pgdp = pgd_offset_k(addr);
pudp = pud_offset(pgdp, addr);
pmdp = pmd_offset(pudp, addr);
ptep = pte_offset_kernel(pmdp, addr);
return pte_val(*ptep) & mask;
}
/* If not locked, zap it. */
void __flush_tlb_all(void)
{
unsigned long pstate;
int i;
__asm__ __volatile__("flushw\n\t"
"rdpr %%pstate, %0\n\t"
"wrpr %0, %1, %%pstate"
: "=r" (pstate)
: "i" (PSTATE_IE));
if (tlb_type == spitfire) {
for (i = 0; i < 64; i++) {
/* Spitfire Errata #32 workaround */
/* NOTE: Always runs on spitfire, so no
* cheetah+ page size encodings.
*/
__asm__ __volatile__("stxa %0, [%1] %2\n\t"
"flush %%g6"
: /* No outputs */
: "r" (0),
"r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync"
: /* no outputs */
: "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
spitfire_put_dtlb_data(i, 0x0UL);
}
/* Spitfire Errata #32 workaround */
/* NOTE: Always runs on spitfire, so no
* cheetah+ page size encodings.
*/
__asm__ __volatile__("stxa %0, [%1] %2\n\t"
"flush %%g6"
: /* No outputs */
: "r" (0),
"r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync"
: /* no outputs */
: "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
spitfire_put_itlb_data(i, 0x0UL);
}
}
} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
cheetah_flush_dtlb_all();
cheetah_flush_itlb_all();
}
__asm__ __volatile__("wrpr %0, 0, %%pstate"
: : "r" (pstate));
}
...@@ -85,8 +85,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) ...@@ -85,8 +85,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb); mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb);
base = TSBMAP_BASE; base = TSBMAP_BASE;
tte = (_PAGE_VALID | _PAGE_L | _PAGE_CP | tte = pgprot_val(PAGE_KERNEL_LOCKED);
_PAGE_CV | _PAGE_P | _PAGE_W);
tsb_paddr = __pa(mm->context.tsb); tsb_paddr = __pa(mm->context.tsb);
BUG_ON(tsb_paddr & (tsb_bytes - 1UL)); BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
...@@ -99,55 +98,48 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) ...@@ -99,55 +98,48 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
#ifdef DCACHE_ALIASING_POSSIBLE #ifdef DCACHE_ALIASING_POSSIBLE
base += (tsb_paddr & 8192); base += (tsb_paddr & 8192);
#endif #endif
tte |= _PAGE_SZ8K;
page_sz = 8192; page_sz = 8192;
break; break;
case 8192 << 1: case 8192 << 1:
tsb_reg = 0x1UL; tsb_reg = 0x1UL;
tte |= _PAGE_SZ64K;
page_sz = 64 * 1024; page_sz = 64 * 1024;
break; break;
case 8192 << 2: case 8192 << 2:
tsb_reg = 0x2UL; tsb_reg = 0x2UL;
tte |= _PAGE_SZ64K;
page_sz = 64 * 1024; page_sz = 64 * 1024;
break; break;
case 8192 << 3: case 8192 << 3:
tsb_reg = 0x3UL; tsb_reg = 0x3UL;
tte |= _PAGE_SZ64K;
page_sz = 64 * 1024; page_sz = 64 * 1024;
break; break;
case 8192 << 4: case 8192 << 4:
tsb_reg = 0x4UL; tsb_reg = 0x4UL;
tte |= _PAGE_SZ512K;
page_sz = 512 * 1024; page_sz = 512 * 1024;
break; break;
case 8192 << 5: case 8192 << 5:
tsb_reg = 0x5UL; tsb_reg = 0x5UL;
tte |= _PAGE_SZ512K;
page_sz = 512 * 1024; page_sz = 512 * 1024;
break; break;
case 8192 << 6: case 8192 << 6:
tsb_reg = 0x6UL; tsb_reg = 0x6UL;
tte |= _PAGE_SZ512K;
page_sz = 512 * 1024; page_sz = 512 * 1024;
break; break;
case 8192 << 7: case 8192 << 7:
tsb_reg = 0x7UL; tsb_reg = 0x7UL;
tte |= _PAGE_SZ4MB;
page_sz = 4 * 1024 * 1024; page_sz = 4 * 1024 * 1024;
break; break;
default: default:
BUG(); BUG();
}; };
tte |= pte_sz_bits(page_sz);
if (tlb_type == cheetah_plus || tlb_type == hypervisor) { if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
/* Physical mapping, no locked TLB entry for TSB. */ /* Physical mapping, no locked TLB entry for TSB. */
......
...@@ -90,134 +90,48 @@ ...@@ -90,134 +90,48 @@
#endif /* !(__ASSEMBLY__) */ #endif /* !(__ASSEMBLY__) */
/* Spitfire/Cheetah TTE bits. */ /* PTE bits which are the same in SUN4U and SUN4V format. */
#define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */ #define _PAGE_VALID 0x8000000000000000 /* Valid TTE */
#define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit up to date*/ #define _PAGE_R 0x8000000000000000 /* Keep ref bit up to date*/
#define _PAGE_SZ4MB _AC(0x6000000000000000,UL) /* 4MB Page */
#define _PAGE_SZ512K _AC(0x4000000000000000,UL) /* 512K Page */ /* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */
#define _PAGE_SZ64K _AC(0x2000000000000000,UL) /* 64K Page */ #define __P000 __pgprot(0)
#define _PAGE_SZ8K _AC(0x0000000000000000,UL) /* 8K Page */ #define __P001 __pgprot(0)
#define _PAGE_NFO _AC(0x1000000000000000,UL) /* No Fault Only */ #define __P010 __pgprot(0)
#define _PAGE_IE _AC(0x0800000000000000,UL) /* Invert Endianness */ #define __P011 __pgprot(0)
#define _PAGE_SOFT2 _AC(0x07FC000000000000,UL) /* Software bits, set 2 */ #define __P100 __pgprot(0)
#define _PAGE_RES1 _AC(0x0002000000000000,UL) /* Reserved */ #define __P101 __pgprot(0)
#define _PAGE_SZ32MB _AC(0x0001000000000000,UL) /* (Panther) 32MB page */ #define __P110 __pgprot(0)
#define _PAGE_SZ256MB _AC(0x2001000000000000,UL) /* (Panther) 256MB page */ #define __P111 __pgprot(0)
#define _PAGE_SN _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */
#define _PAGE_RES2 _AC(0x0000780000000000,UL) /* Reserved */ #define __S000 __pgprot(0)
#define _PAGE_PADDR_SF _AC(0x000001FFFFFFE000,UL) /* (Spitfire) paddr[40:13]*/ #define __S001 __pgprot(0)
#define _PAGE_PADDR _AC(0x000007FFFFFFE000,UL) /* (Cheetah) paddr[42:13] */ #define __S010 __pgprot(0)
#define _PAGE_SOFT _AC(0x0000000000001F80,UL) /* Software bits */ #define __S011 __pgprot(0)
#define _PAGE_L _AC(0x0000000000000040,UL) /* Locked TTE */ #define __S100 __pgprot(0)
#define _PAGE_CP _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */ #define __S101 __pgprot(0)
#define _PAGE_CV _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */ #define __S110 __pgprot(0)
#define _PAGE_E _AC(0x0000000000000008,UL) /* side-Effect */ #define __S111 __pgprot(0)
#define _PAGE_P _AC(0x0000000000000004,UL) /* Privileged Page */
#define _PAGE_W _AC(0x0000000000000002,UL) /* Writable */
#define _PAGE_G _AC(0x0000000000000001,UL) /* Global */
#define _PAGE_ALL_SZ_BITS \
(_PAGE_SZ4MB | _PAGE_SZ512K | _PAGE_SZ64K | \
_PAGE_SZ8K | _PAGE_SZ32MB | _PAGE_SZ256MB)
/* Here are the SpitFire software bits we use in the TTE's.
*
* WARNING: If you are going to try and start using some
* of the soft2 bits, you will need to make
* modifications to the swap entry implementation.
* For example, one thing that could happen is that
* swp_entry_to_pte() would BUG_ON() if you tried
* to use one of the soft2 bits for _PAGE_FILE.
*
* Like other architectures, I have aliased _PAGE_FILE with
* _PAGE_MODIFIED. This works because _PAGE_FILE is never
* interpreted that way unless _PAGE_PRESENT is clear.
*/
#define _PAGE_EXEC _AC(0x0000000000001000,UL) /* Executable SW bit */
#define _PAGE_MODIFIED _AC(0x0000000000000800,UL) /* Modified (dirty) */
#define _PAGE_FILE _AC(0x0000000000000800,UL) /* Pagecache page */
#define _PAGE_ACCESSED _AC(0x0000000000000400,UL) /* Accessed (ref'd) */
#define _PAGE_READ _AC(0x0000000000000200,UL) /* Readable SW Bit */
#define _PAGE_WRITE _AC(0x0000000000000100,UL) /* Writable SW Bit */
#define _PAGE_PRESENT _AC(0x0000000000000080,UL) /* Present */
#if PAGE_SHIFT == 13
#define _PAGE_SZBITS _PAGE_SZ8K
#elif PAGE_SHIFT == 16
#define _PAGE_SZBITS _PAGE_SZ64K
#elif PAGE_SHIFT == 19
#define _PAGE_SZBITS _PAGE_SZ512K
#elif PAGE_SHIFT == 22
#define _PAGE_SZBITS _PAGE_SZ4MB
#else
#error Wrong PAGE_SHIFT specified
#endif
#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
#define _PAGE_SZHUGE _PAGE_SZ4MB
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
#define _PAGE_SZHUGE _PAGE_SZ512K
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
#define _PAGE_SZHUGE _PAGE_SZ64K
#endif
#define _PAGE_CACHE (_PAGE_CP | _PAGE_CV)
#define __DIRTY_BITS (_PAGE_MODIFIED | _PAGE_WRITE | _PAGE_W)
#define __ACCESS_BITS (_PAGE_ACCESSED | _PAGE_READ | _PAGE_R)
#define __PRIV_BITS _PAGE_P
#define PAGE_NONE __pgprot (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_CACHE)
/* Don't set the TTE _PAGE_W bit here, else the dirty bit never gets set. */
#define PAGE_SHARED __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
__ACCESS_BITS | _PAGE_WRITE | _PAGE_EXEC)
#define PAGE_COPY __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
__ACCESS_BITS | _PAGE_EXEC)
#define PAGE_READONLY __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
__ACCESS_BITS | _PAGE_EXEC)
#define PAGE_KERNEL __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
__PRIV_BITS | \
__ACCESS_BITS | __DIRTY_BITS | _PAGE_EXEC)
#define PAGE_SHARED_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | \
_PAGE_CACHE | \
__ACCESS_BITS | _PAGE_WRITE)
#define PAGE_COPY_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | \
_PAGE_CACHE | __ACCESS_BITS)
#define PAGE_READONLY_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | \
_PAGE_CACHE | __ACCESS_BITS)
#define _PFN_MASK _PAGE_PADDR
#define pg_iobits (_PAGE_VALID | _PAGE_PRESENT | __DIRTY_BITS | \
__ACCESS_BITS | _PAGE_E)
#define __P000 PAGE_NONE
#define __P001 PAGE_READONLY_NOEXEC
#define __P010 PAGE_COPY_NOEXEC
#define __P011 PAGE_COPY_NOEXEC
#define __P100 PAGE_READONLY
#define __P101 PAGE_READONLY
#define __P110 PAGE_COPY
#define __P111 PAGE_COPY
#define __S000 PAGE_NONE
#define __S001 PAGE_READONLY_NOEXEC
#define __S010 PAGE_SHARED_NOEXEC
#define __S011 PAGE_SHARED_NOEXEC
#define __S100 PAGE_READONLY
#define __S101 PAGE_READONLY
#define __S110 PAGE_SHARED
#define __S111 PAGE_SHARED
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long);
extern unsigned long pte_sz_bits(unsigned long size);
extern pgprot_t PAGE_KERNEL;
extern pgprot_t PAGE_KERNEL_LOCKED;
extern pgprot_t PAGE_COPY;
/* XXX This uglyness is for the atyfb driver's sparc mmap() support. XXX */
extern unsigned long _PAGE_IE;
extern unsigned long _PAGE_E;
extern unsigned long _PAGE_CACHE;
extern unsigned long pg_iobits;
extern unsigned long _PAGE_ALL_SZ_BITS;
extern unsigned long _PAGE_SZBITS;
extern unsigned long phys_base; extern unsigned long phys_base;
extern unsigned long pfn_base; extern unsigned long pfn_base;
...@@ -229,27 +143,12 @@ extern struct page *mem_map_zero; ...@@ -229,27 +143,12 @@ extern struct page *mem_map_zero;
* the first physical page in the machine is at some huge physical address, * the first physical page in the machine is at some huge physical address,
* such as 4GB. This is common on a partitioned E10000, for example. * such as 4GB. This is common on a partitioned E10000, for example.
*/ */
extern pte_t pfn_pte(unsigned long, pgprot_t);
#define pfn_pte(pfn, prot) \
__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot) | _PAGE_SZBITS)
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
extern unsigned long pte_pfn(pte_t);
#define pte_page(x) pfn_to_page(pte_pfn(x))
extern pte_t pte_modify(pte_t, pgprot_t);
#define pte_pfn(x) ((pte_val(x) & _PAGE_PADDR)>>PAGE_SHIFT)
#define pte_page(x) pfn_to_page(pte_pfn(x))
static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
{
pte_t __pte;
const unsigned long preserve_mask = (_PFN_MASK |
_PAGE_MODIFIED | _PAGE_ACCESSED |
_PAGE_CACHE | _PAGE_E |
_PAGE_PRESENT | _PAGE_SZBITS);
pte_val(__pte) = (pte_val(orig_pte) & preserve_mask) |
(pgprot_val(new_prot) & ~preserve_mask);
return __pte;
}
#define pmd_set(pmdp, ptep) \ #define pmd_set(pmdp, ptep) \
(pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL)) (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL))
#define pud_set(pudp, pmdp) \ #define pud_set(pudp, pmdp) \
...@@ -259,8 +158,6 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) ...@@ -259,8 +158,6 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd)) #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
#define pud_page(pud) \ #define pud_page(pud) \
((unsigned long) __va((((unsigned long)pud_val(pud))<<11UL))) ((unsigned long) __va((((unsigned long)pud_val(pud))<<11UL)))
#define pte_none(pte) (!pte_val(pte))
#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
#define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) (0) #define pmd_bad(pmd) (0)
#define pmd_present(pmd) (pmd_val(pmd) != 0U) #define pmd_present(pmd) (pmd_val(pmd) != 0U)
...@@ -270,30 +167,29 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) ...@@ -270,30 +167,29 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
#define pud_present(pud) (pud_val(pud) != 0U) #define pud_present(pud) (pud_val(pud) != 0U)
#define pud_clear(pudp) (pud_val(*(pudp)) = 0U) #define pud_clear(pudp) (pud_val(*(pudp)) = 0U)
/* Same in both SUN4V and SUN4U. */
#define pte_none(pte) (!pte_val(pte))
extern unsigned long pte_present(pte_t);
/* The following only work if pte_present() is true. /* The following only work if pte_present() is true.
* Undefined behaviour if not.. * Undefined behaviour if not..
*/ */
#define pte_read(pte) (pte_val(pte) & _PAGE_READ) extern unsigned long pte_read(pte_t);
#define pte_exec(pte) (pte_val(pte) & _PAGE_EXEC) extern unsigned long pte_exec(pte_t);
#define pte_write(pte) (pte_val(pte) & _PAGE_WRITE) extern unsigned long pte_write(pte_t);
#define pte_dirty(pte) (pte_val(pte) & _PAGE_MODIFIED) extern unsigned long pte_dirty(pte_t);
#define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED) extern unsigned long pte_young(pte_t);
#define pte_wrprotect(pte) (__pte(pte_val(pte) & ~(_PAGE_WRITE|_PAGE_W))) extern pte_t pte_wrprotect(pte_t);
#define pte_rdprotect(pte) \ extern pte_t pte_rdprotect(pte_t);
(__pte(((pte_val(pte)<<1UL)>>1UL) & ~_PAGE_READ)) extern pte_t pte_mkclean(pte_t);
#define pte_mkclean(pte) \ extern pte_t pte_mkold(pte_t);
(__pte(pte_val(pte) & ~(_PAGE_MODIFIED|_PAGE_W)))
#define pte_mkold(pte) \
(__pte(((pte_val(pte)<<1UL)>>1UL) & ~_PAGE_ACCESSED))
/* Permanent address of a page. */
#define __page_address(page) page_address(page)
/* Be very careful when you change these three, they are delicate. */ /* Be very careful when you change these three, they are delicate. */
#define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_ACCESSED | _PAGE_R)) extern pte_t pte_mkyoung(pte_t);
#define pte_mkwrite(pte) (__pte(pte_val(pte) | _PAGE_WRITE)) extern pte_t pte_mkwrite(pte_t);
#define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_MODIFIED | _PAGE_W)) extern pte_t pte_mkdirty(pte_t);
#define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_SZHUGE)) extern pte_t pte_mkhuge(pte_t);
/* to find an entry in a page-table-directory. */ /* to find an entry in a page-table-directory. */
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
...@@ -328,6 +224,9 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *p ...@@ -328,6 +224,9 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *p
/* It is more efficient to let flush_tlb_kernel_range() /* It is more efficient to let flush_tlb_kernel_range()
* handle init_mm tlb flushes. * handle init_mm tlb flushes.
*
* SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
* and SUN4V pte layout, so this inline test is fine.
*/ */
if (likely(mm != &init_mm) && (pte_val(orig) & _PAGE_VALID)) if (likely(mm != &init_mm) && (pte_val(orig) & _PAGE_VALID))
tlb_batch_add(mm, addr, ptep, orig); tlb_batch_add(mm, addr, ptep, orig);
...@@ -362,42 +261,23 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); ...@@ -362,42 +261,23 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
/* File offset in PTE support. */ /* File offset in PTE support. */
#define pte_file(pte) (pte_val(pte) & _PAGE_FILE) extern unsigned long pte_file(pte_t);
#define pte_to_pgoff(pte) (pte_val(pte) >> PAGE_SHIFT) #define pte_to_pgoff(pte) (pte_val(pte) >> PAGE_SHIFT)
#define pgoff_to_pte(off) (__pte(((off) << PAGE_SHIFT) | _PAGE_FILE)) extern pte_t pgoff_to_pte(unsigned long);
#define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL) #define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL)
extern unsigned long prom_virt_to_phys(unsigned long, int *); extern unsigned long prom_virt_to_phys(unsigned long, int *);
static __inline__ unsigned long extern unsigned long sun4u_get_pte(unsigned long);
sun4u_get_pte (unsigned long addr)
{
pgd_t *pgdp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
if (addr >= PAGE_OFFSET)
return addr & _PAGE_PADDR;
if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS))
return prom_virt_to_phys(addr, NULL);
pgdp = pgd_offset_k(addr);
pudp = pud_offset(pgdp, addr);
pmdp = pmd_offset(pudp, addr);
ptep = pte_offset_kernel(pmdp, addr);
return pte_val(*ptep) & _PAGE_PADDR;
}
static __inline__ unsigned long static inline unsigned long __get_phys(unsigned long addr)
__get_phys (unsigned long addr)
{ {
return sun4u_get_pte (addr); return sun4u_get_pte(addr);
} }
static __inline__ int static inline int __get_iospace(unsigned long addr)
__get_iospace (unsigned long addr)
{ {
return ((sun4u_get_pte (addr) & 0xf0000000) >> 28); return ((sun4u_get_pte(addr) & 0xf0000000) >> 28);
} }
extern unsigned long *sparc64_valid_addr_bitmap; extern unsigned long *sparc64_valid_addr_bitmap;
...@@ -411,9 +291,7 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, ...@@ -411,9 +291,7 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
unsigned long size, pgprot_t prot); unsigned long size, pgprot_t prot);
/* Clear virtual and physical cachability, set side-effect bit. */ /* Clear virtual and physical cachability, set side-effect bit. */
#define pgprot_noncached(prot) \ extern pgprot_t pgprot_noncached(pgprot_t);
(__pgprot((pgprot_val(prot) & ~(_PAGE_CP | _PAGE_CV)) | \
_PAGE_E))
/* /*
* For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册