提交 b5666f70 编写于 作者: M Michael Ellerman 提交者: Paul Mackerras

[PATCH] powerpc: Separate usage of KERNELBASE and PAGE_OFFSET

This patch separates usage of KERNELBASE and PAGE_OFFSET. I haven't
looked at any of the PPC32 code, if we ever want to support Kdump on
PPC we'll have to do another audit, ditto for iSeries.

This patch makes PAGE_OFFSET the constant, it'll always be 0xC * 1
gazillion for 64-bit.

To get a physical address from a virtual one you subtract PAGE_OFFSET,
_not_ KERNELBASE.

KERNELBASE is the virtual address of the start of the kernel, it's
often the same as PAGE_OFFSET, but _might not be_.

If you want to know something's offset from the start of the kernel
you should subtract KERNELBASE.
Signed-off-by: NMichael Ellerman <michael@ellerman.id.au>
Signed-off-by: NPaul Mackerras <paulus@samba.org>
上级 51fae6de
...@@ -60,7 +60,7 @@ int force_printk_to_btext = 0; ...@@ -60,7 +60,7 @@ int force_printk_to_btext = 0;
* *
* The display is mapped to virtual address 0xD0000000, rather * The display is mapped to virtual address 0xD0000000, rather
* than 1:1, because some some CHRP machines put the frame buffer * than 1:1, because some some CHRP machines put the frame buffer
* in the region starting at 0xC0000000 (KERNELBASE). * in the region starting at 0xC0000000 (PAGE_OFFSET).
* This mapping is temporary and will disappear as soon as the * This mapping is temporary and will disappear as soon as the
* setup done by MMU_Init() is applied. * setup done by MMU_Init() is applied.
* *
...@@ -71,7 +71,7 @@ int force_printk_to_btext = 0; ...@@ -71,7 +71,7 @@ int force_printk_to_btext = 0;
*/ */
void __init btext_prepare_BAT(void) void __init btext_prepare_BAT(void)
{ {
unsigned long vaddr = KERNELBASE + 0x10000000; unsigned long vaddr = PAGE_OFFSET + 0x10000000;
unsigned long addr; unsigned long addr;
unsigned long lowbits; unsigned long lowbits;
......
...@@ -690,7 +690,7 @@ _GLOBAL(enter_rtas) ...@@ -690,7 +690,7 @@ _GLOBAL(enter_rtas)
/* Setup our real return addr */ /* Setup our real return addr */
SET_REG_TO_LABEL(r4,.rtas_return_loc) SET_REG_TO_LABEL(r4,.rtas_return_loc)
SET_REG_TO_CONST(r9,KERNELBASE) SET_REG_TO_CONST(r9,PAGE_OFFSET)
sub r4,r4,r9 sub r4,r4,r9
mtlr r4 mtlr r4
...@@ -718,7 +718,7 @@ _GLOBAL(enter_rtas) ...@@ -718,7 +718,7 @@ _GLOBAL(enter_rtas)
_STATIC(rtas_return_loc) _STATIC(rtas_return_loc)
/* relocation is off at this point */ /* relocation is off at this point */
mfspr r4,SPRN_SPRG3 /* Get PACA */ mfspr r4,SPRN_SPRG3 /* Get PACA */
SET_REG_TO_CONST(r5, KERNELBASE) SET_REG_TO_CONST(r5, PAGE_OFFSET)
sub r4,r4,r5 /* RELOC the PACA base pointer */ sub r4,r4,r5 /* RELOC the PACA base pointer */
mfmsr r6 mfmsr r6
......
...@@ -16,8 +16,8 @@ const struct LparMap __attribute__((__section__(".text"))) xLparMap = { ...@@ -16,8 +16,8 @@ const struct LparMap __attribute__((__section__(".text"))) xLparMap = {
.xSegmentTableOffs = STAB0_PAGE, .xSegmentTableOffs = STAB0_PAGE,
.xEsids = { .xEsids = {
{ .xKernelEsid = GET_ESID(KERNELBASE), { .xKernelEsid = GET_ESID(PAGE_OFFSET),
.xKernelVsid = KERNEL_VSID(KERNELBASE), }, .xKernelVsid = KERNEL_VSID(PAGE_OFFSET), },
{ .xKernelEsid = GET_ESID(VMALLOCBASE), { .xKernelEsid = GET_ESID(VMALLOCBASE),
.xKernelVsid = KERNEL_VSID(VMALLOCBASE), }, .xKernelVsid = KERNEL_VSID(VMALLOCBASE), },
}, },
...@@ -25,7 +25,7 @@ const struct LparMap __attribute__((__section__(".text"))) xLparMap = { ...@@ -25,7 +25,7 @@ const struct LparMap __attribute__((__section__(".text"))) xLparMap = {
.xRanges = { .xRanges = {
{ .xPages = HvPagesToMap, { .xPages = HvPagesToMap,
.xOffset = 0, .xOffset = 0,
.xVPN = KERNEL_VSID(KERNELBASE) << (SID_SHIFT - HW_PAGE_SHIFT), .xVPN = KERNEL_VSID(PAGE_OFFSET) << (SID_SHIFT - HW_PAGE_SHIFT),
}, },
}, },
}; };
...@@ -153,9 +153,8 @@ void kexec_copy_flush(struct kimage *image) ...@@ -153,9 +153,8 @@ void kexec_copy_flush(struct kimage *image)
* including ones that were in place on the original copy * including ones that were in place on the original copy
*/ */
for (i = 0; i < nr_segments; i++) for (i = 0; i < nr_segments; i++)
flush_icache_range(ranges[i].mem + KERNELBASE, flush_icache_range((unsigned long)__va(ranges[i].mem),
ranges[i].mem + KERNELBASE + (unsigned long)__va(ranges[i].mem + ranges[i].memsz));
ranges[i].memsz);
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -456,7 +456,7 @@ void __init htab_initialize(void) ...@@ -456,7 +456,7 @@ void __init htab_initialize(void)
/* create bolted the linear mapping in the hash table */ /* create bolted the linear mapping in the hash table */
for (i=0; i < lmb.memory.cnt; i++) { for (i=0; i < lmb.memory.cnt; i++) {
base = lmb.memory.region[i].base + KERNELBASE; base = (unsigned long)__va(lmb.memory.region[i].base);
size = lmb.memory.region[i].size; size = lmb.memory.region[i].size;
DBG("creating mapping for region: %lx : %lx\n", base, size); DBG("creating mapping for region: %lx : %lx\n", base, size);
...@@ -498,8 +498,8 @@ void __init htab_initialize(void) ...@@ -498,8 +498,8 @@ void __init htab_initialize(void)
* for either 4K or 16MB pages. * for either 4K or 16MB pages.
*/ */
if (tce_alloc_start) { if (tce_alloc_start) {
tce_alloc_start += KERNELBASE; tce_alloc_start = (unsigned long)__va(tce_alloc_start);
tce_alloc_end += KERNELBASE; tce_alloc_end = (unsigned long)__va(tce_alloc_end);
if (base + size >= tce_alloc_start) if (base + size >= tce_alloc_start)
tce_alloc_start = base + size + 1; tce_alloc_start = base + size + 1;
......
...@@ -75,7 +75,7 @@ static void slb_flush_and_rebolt(void) ...@@ -75,7 +75,7 @@ static void slb_flush_and_rebolt(void)
vflags = SLB_VSID_KERNEL | virtual_llp; vflags = SLB_VSID_KERNEL | virtual_llp;
ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); ksp_esid_data = mk_esid_data(get_paca()->kstack, 2);
if ((ksp_esid_data & ESID_MASK) == KERNELBASE) if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET)
ksp_esid_data &= ~SLB_ESID_V; ksp_esid_data &= ~SLB_ESID_V;
/* We need to do this all in asm, so we're sure we don't touch /* We need to do this all in asm, so we're sure we don't touch
...@@ -213,7 +213,7 @@ void slb_initialize(void) ...@@ -213,7 +213,7 @@ void slb_initialize(void)
asm volatile("isync":::"memory"); asm volatile("isync":::"memory");
asm volatile("slbmte %0,%0"::"r" (0) : "memory"); asm volatile("slbmte %0,%0"::"r" (0) : "memory");
asm volatile("isync; slbia; isync":::"memory"); asm volatile("isync; slbia; isync":::"memory");
create_slbe(KERNELBASE, lflags, 0); create_slbe(PAGE_OFFSET, lflags, 0);
/* VMALLOC space has 4K pages always for now */ /* VMALLOC space has 4K pages always for now */
create_slbe(VMALLOCBASE, vflags, 1); create_slbe(VMALLOCBASE, vflags, 1);
......
...@@ -37,9 +37,9 @@ _GLOBAL(slb_allocate_realmode) ...@@ -37,9 +37,9 @@ _GLOBAL(slb_allocate_realmode)
srdi r9,r3,60 /* get region */ srdi r9,r3,60 /* get region */
srdi r10,r3,28 /* get esid */ srdi r10,r3,28 /* get esid */
cmpldi cr7,r9,0xc /* cmp KERNELBASE for later use */ cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */
/* r3 = address, r10 = esid, cr7 = <>KERNELBASE */ /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
blt cr7,0f /* user or kernel? */ blt cr7,0f /* user or kernel? */
/* kernel address: proto-VSID = ESID */ /* kernel address: proto-VSID = ESID */
...@@ -166,7 +166,7 @@ _GLOBAL(slb_allocate_user) ...@@ -166,7 +166,7 @@ _GLOBAL(slb_allocate_user)
/* /*
* Finish loading of an SLB entry and return * Finish loading of an SLB entry and return
* *
* r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <>KERNELBASE * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
*/ */
slb_finish_load: slb_finish_load:
ASM_VSID_SCRAMBLE(r10,r9) ASM_VSID_SCRAMBLE(r10,r9)
......
...@@ -40,7 +40,7 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid) ...@@ -40,7 +40,7 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
unsigned long entry, group, old_esid, castout_entry, i; unsigned long entry, group, old_esid, castout_entry, i;
unsigned int global_entry; unsigned int global_entry;
struct stab_entry *ste, *castout_ste; struct stab_entry *ste, *castout_ste;
unsigned long kernel_segment = (esid << SID_SHIFT) >= KERNELBASE; unsigned long kernel_segment = (esid << SID_SHIFT) >= PAGE_OFFSET;
vsid_data = vsid << STE_VSID_SHIFT; vsid_data = vsid << STE_VSID_SHIFT;
esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V; esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V;
...@@ -83,7 +83,7 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid) ...@@ -83,7 +83,7 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
} }
/* Dont cast out the first kernel segment */ /* Dont cast out the first kernel segment */
if ((castout_ste->esid_data & ESID_MASK) != KERNELBASE) if ((castout_ste->esid_data & ESID_MASK) != PAGE_OFFSET)
break; break;
castout_entry = (castout_entry + 1) & 0xf; castout_entry = (castout_entry + 1) & 0xf;
...@@ -251,7 +251,7 @@ void stabs_alloc(void) ...@@ -251,7 +251,7 @@ void stabs_alloc(void)
panic("Unable to allocate segment table for CPU %d.\n", panic("Unable to allocate segment table for CPU %d.\n",
cpu); cpu);
newstab += KERNELBASE; newstab = (unsigned long)__va(newstab);
memset((void *)newstab, 0, HW_PAGE_SIZE); memset((void *)newstab, 0, HW_PAGE_SIZE);
...@@ -270,11 +270,11 @@ void stabs_alloc(void) ...@@ -270,11 +270,11 @@ void stabs_alloc(void)
*/ */
void stab_initialize(unsigned long stab) void stab_initialize(unsigned long stab)
{ {
unsigned long vsid = get_kernel_vsid(KERNELBASE); unsigned long vsid = get_kernel_vsid(PAGE_OFFSET);
unsigned long stabreal; unsigned long stabreal;
asm volatile("isync; slbia; isync":::"memory"); asm volatile("isync; slbia; isync":::"memory");
make_ste(stab, GET_ESID(KERNELBASE), vsid); make_ste(stab, GET_ESID(PAGE_OFFSET), vsid);
/* Order update */ /* Order update */
asm volatile("sync":::"memory"); asm volatile("sync":::"memory");
......
...@@ -37,6 +37,20 @@ ...@@ -37,6 +37,20 @@
*/ */
#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
/*
* KERNELBASE is the virtual address of the start of the kernel, it's often
* the same as PAGE_OFFSET, but _might not be_.
*
* The kdump dump kernel is one example where KERNELBASE != PAGE_OFFSET.
*
* To get a physical address from a virtual one you subtract PAGE_OFFSET,
* _not_ KERNELBASE.
*
* If you want to know something's offset from the start of the kernel you
* should subtract KERNELBASE.
*
* If you want to test if something's a kernel address, use is_kernel_addr().
*/
#define PAGE_OFFSET ASM_CONST(CONFIG_KERNEL_START) #define PAGE_OFFSET ASM_CONST(CONFIG_KERNEL_START)
#define KERNELBASE PAGE_OFFSET #define KERNELBASE PAGE_OFFSET
...@@ -56,7 +70,7 @@ ...@@ -56,7 +70,7 @@
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define __va(x) ((void *)((unsigned long)(x) + KERNELBASE)) #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET))
#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
/* /*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册