提交 16a15a30 编写于 作者: S Stephen Rothwell 提交者: Paul Mackerras

[POWERPC] iSeries: Clean up lparmap mess

We need to have xLparMap in head_64.S so that it is at a fixed address
(because the linker will not resolve (address & 0xffffffff) for us).
But the assembler miscalculates the KERNEL_VSID() expressions.  So put
the confusing expressions into asm-offsets.c.
Signed-off-by: NStephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: NPaul Mackerras <paulus@samba.org>
上级 556ecf9b
...@@ -80,13 +80,6 @@ ifneq ($(CONFIG_PPC_INDIRECT_IO),y) ...@@ -80,13 +80,6 @@ ifneq ($(CONFIG_PPC_INDIRECT_IO),y)
obj-y += iomap.o obj-y += iomap.o
endif endif
ifeq ($(CONFIG_PPC_ISERIES),y)
CFLAGS_lparmap.s += -g0
extra-y += lparmap.s
$(obj)/head_64.o: $(obj)/lparmap.s
AFLAGS_head_64.o += -I$(obj)
endif
else else
# stuff used from here for ARCH=ppc # stuff used from here for ARCH=ppc
smpobj-$(CONFIG_SMP) += smp.o smpobj-$(CONFIG_SMP) += smp.o
......
...@@ -312,5 +312,13 @@ int main(void) ...@@ -312,5 +312,13 @@ int main(void)
#ifdef CONFIG_BUG #ifdef CONFIG_BUG
DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry)); DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry));
#endif #endif
#ifdef CONFIG_PPC_ISERIES
/* the assembler miscalculates the VSID values */
DEFINE(PAGE_OFFSET_ESID, GET_ESID(PAGE_OFFSET));
DEFINE(PAGE_OFFSET_VSID, KERNEL_VSID(PAGE_OFFSET));
DEFINE(VMALLOC_START_ESID, GET_ESID(VMALLOC_START));
DEFINE(VMALLOC_START_VSID, KERNEL_VSID(VMALLOC_START));
#endif
return 0; return 0;
} }
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <asm/iseries/lpar_map.h> #include <asm/iseries/lpar_map.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/firmware.h> #include <asm/firmware.h>
#include <asm/page_64.h>
#define DO_SOFT_DISABLE #define DO_SOFT_DISABLE
...@@ -1519,8 +1520,8 @@ _GLOBAL(do_stab_bolted) ...@@ -1519,8 +1520,8 @@ _GLOBAL(do_stab_bolted)
* Space for CPU0's segment table. * Space for CPU0's segment table.
* *
* On iSeries, the hypervisor must fill in at least one entry before * On iSeries, the hypervisor must fill in at least one entry before
* we get control (with relocate on). The address is give to the hv * we get control (with relocate on). The address is given to the hv
* as a page number (see xLparMap in lpardata.c), so this must be at a * as a page number (see xLparMap below), so this must be at a
* fixed address (the linker can't compute (u64)&initial_stab >> * fixed address (the linker can't compute (u64)&initial_stab >>
* PAGE_SHIFT). * PAGE_SHIFT).
*/ */
...@@ -1542,12 +1543,22 @@ fwnmi_data_area: ...@@ -1542,12 +1543,22 @@ fwnmi_data_area:
* both pSeries and iSeries */ * both pSeries and iSeries */
#ifdef CONFIG_PPC_ISERIES #ifdef CONFIG_PPC_ISERIES
. = LPARMAP_PHYS . = LPARMAP_PHYS
#include "lparmap.s" .globl xLparMap
/* xLparMap:
* This ".text" is here for old compilers that generate a trailing .quad HvEsidsToMap /* xNumberEsids */
* .note section when compiling .c files to .s .quad HvRangesToMap /* xNumberRanges */
*/ .quad STAB0_PAGE /* xSegmentTableOffs */
.text .zero 40 /* xRsvd */
/* xEsids (HvEsidsToMap entries of 2 quads) */
.quad PAGE_OFFSET_ESID /* xKernelEsid */
.quad PAGE_OFFSET_VSID /* xKernelVsid */
.quad VMALLOC_START_ESID /* xKernelEsid */
.quad VMALLOC_START_VSID /* xKernelVsid */
/* xRanges (HvRangesToMap entries of 3 quads) */
.quad HvPagesToMap /* xPages */
.quad 0 /* xOffset */
.quad PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT) /* xVPN */
#endif /* CONFIG_PPC_ISERIES */ #endif /* CONFIG_PPC_ISERIES */
. = 0x8000 . = 0x8000
......
/*
* Copyright (C) 2005 Stephen Rothwell IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/mmu.h>
#include <asm/pgtable.h>
#include <asm/iseries/lpar_map.h>
/* The # is to stop gcc trying to make .text nonexecutable */
const struct LparMap __attribute__((__section__(".text #"))) xLparMap = {
.xNumberEsids = HvEsidsToMap,
.xNumberRanges = HvRangesToMap,
.xSegmentTableOffs = STAB0_PAGE,
.xEsids = {
{ .xKernelEsid = GET_ESID(PAGE_OFFSET),
.xKernelVsid = KERNEL_VSID(PAGE_OFFSET), },
{ .xKernelEsid = GET_ESID(VMALLOC_START),
.xKernelVsid = KERNEL_VSID(VMALLOC_START), },
},
.xRanges = {
{ .xPages = HvPagesToMap,
.xOffset = 0,
.xVPN = KERNEL_VSID(PAGE_OFFSET) << (SID_SHIFT - HW_PAGE_SHIFT),
},
},
};
...@@ -22,6 +22,8 @@ ...@@ -22,6 +22,8 @@
#include <asm/types.h> #include <asm/types.h>
#endif
/* /*
* The iSeries hypervisor will set up mapping for one or more * The iSeries hypervisor will set up mapping for one or more
* ESID/VSID pairs (in SLB/segment registers) and will set up * ESID/VSID pairs (in SLB/segment registers) and will set up
...@@ -56,6 +58,7 @@ ...@@ -56,6 +58,7 @@
/* Hypervisor initially maps 32MB of the load area */ /* Hypervisor initially maps 32MB of the load area */
#define HvPagesToMap 8192 #define HvPagesToMap 8192
#ifndef __ASSEMBLY__
struct LparMap { struct LparMap {
u64 xNumberEsids; // Number of ESID/VSID pairs u64 xNumberEsids; // Number of ESID/VSID pairs
u64 xNumberRanges; // Number of VA ranges to map u64 xNumberRanges; // Number of VA ranges to map
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
/* Segment size */ /* Segment size */
#define SID_SHIFT 28 #define SID_SHIFT 28
#define SID_MASK 0xfffffffffUL #define SID_MASK ASM_CONST(0xfffffffff)
#define ESID_MASK 0xfffffffff0000000UL #define ESID_MASK 0xfffffffff0000000UL
#define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK) #define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册