Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
5a216a20
cloud-kernel
项目概览
openanolis
/
cloud-kernel
1 年多 前同步成功
通知
160
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
5a216a20
编写于
2月 09, 2008
作者:
M
Martin Schwidefsky
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[S390] Add four level page tables for CONFIG_64BIT=y.
Signed-off-by:
N
Martin Schwidefsky
<
schwidefsky@de.ibm.com
>
上级
146e4b3c
变更
8
显示空白变更内容
内联
并排
Showing
8 changed file
with
124 addition
and
48 deletion
+124
-48
arch/s390/mm/init.c
arch/s390/mm/init.c
+2
-2
arch/s390/mm/vmem.c
arch/s390/mm/vmem.c
+13
-1
include/asm-s390/elf.h
include/asm-s390/elf.h
+1
-8
include/asm-s390/mmu_context.h
include/asm-s390/mmu_context.h
+1
-1
include/asm-s390/pgalloc.h
include/asm-s390/pgalloc.h
+24
-5
include/asm-s390/pgtable.h
include/asm-s390/pgtable.h
+45
-12
include/asm-s390/processor.h
include/asm-s390/processor.h
+14
-10
include/asm-s390/tlb.h
include/asm-s390/tlb.h
+24
-9
未找到文件。
arch/s390/mm/init.c
浏览文件 @
5a216a20
...
...
@@ -112,8 +112,8 @@ void __init paging_init(void)
init_mm
.
pgd
=
swapper_pg_dir
;
S390_lowcore
.
kernel_asce
=
__pa
(
init_mm
.
pgd
)
&
PAGE_MASK
;
#ifdef CONFIG_64BIT
S390_lowcore
.
kernel_asce
|=
_ASCE_TYPE_REGION
3
|
_ASCE_TABLE_LENGTH
;
pgd_type
=
_REGION
3
_ENTRY_EMPTY
;
S390_lowcore
.
kernel_asce
|=
_ASCE_TYPE_REGION
2
|
_ASCE_TABLE_LENGTH
;
pgd_type
=
_REGION
2
_ENTRY_EMPTY
;
#else
S390_lowcore
.
kernel_asce
|=
_ASCE_TABLE_LENGTH
;
pgd_type
=
_SEGMENT_ENTRY_EMPTY
;
...
...
arch/s390/mm/vmem.c
浏览文件 @
5a216a20
...
...
@@ -69,7 +69,19 @@ static void __ref *vmem_alloc_pages(unsigned int order)
return
alloc_bootmem_pages
((
1
<<
order
)
*
PAGE_SIZE
);
}
#define vmem_pud_alloc() ({ BUG(); ((pud_t *) NULL); })
static
inline
pud_t
*
vmem_pud_alloc
(
void
)
{
pud_t
*
pud
=
NULL
;
#ifdef CONFIG_64BIT
pud
=
vmem_alloc_pages
(
2
);
if
(
!
pud
)
return
NULL
;
pud_val
(
*
pud
)
=
_REGION3_ENTRY_EMPTY
;
memcpy
(
pud
+
1
,
pud
,
(
PTRS_PER_PUD
-
1
)
*
sizeof
(
pud_t
));
#endif
return
pud
;
}
static
inline
pmd_t
*
vmem_pmd_alloc
(
void
)
{
...
...
include/asm-s390/elf.h
浏览文件 @
5a216a20
...
...
@@ -138,14 +138,7 @@ typedef s390_regs elf_gregset_t;
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
#ifndef __s390x__
#define ELF_ET_DYN_BASE ((TASK_SIZE & 0x80000000) \
? TASK_SIZE / 3 * 2 \
: 2 * TASK_SIZE / 3)
#else
/* __s390x__ */
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
#endif
/* __s390x__ */
/* Wow, the "main" arch needs arch dependent functions too.. :) */
...
...
include/asm-s390/mmu_context.h
浏览文件 @
5a216a20
...
...
@@ -18,7 +18,7 @@ static inline int init_new_context(struct task_struct *tsk,
{
mm
->
context
.
asce_bits
=
_ASCE_TABLE_LENGTH
|
_ASCE_USER_BITS
;
#ifdef CONFIG_64BIT
mm
->
context
.
asce_bits
|=
_ASCE_TYPE_REGION
3
;
mm
->
context
.
asce_bits
|=
_ASCE_TYPE_REGION
2
;
#endif
mm
->
context
.
noexec
=
s390_noexec
;
return
0
;
...
...
include/asm-s390/pgalloc.h
浏览文件 @
5a216a20
...
...
@@ -73,11 +73,17 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
static
inline
unsigned
long
pgd_entry_type
(
struct
mm_struct
*
mm
)
{
return
_REGION
3
_ENTRY_EMPTY
;
return
_REGION
2
_ENTRY_EMPTY
;
}
#define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); })
#define pud_free(mm, x) do { } while (0)
static
inline
pud_t
*
pud_alloc_one
(
struct
mm_struct
*
mm
,
unsigned
long
address
)
{
unsigned
long
*
table
=
crst_table_alloc
(
mm
,
mm
->
context
.
noexec
);
if
(
table
)
crst_table_init
(
table
,
_REGION3_ENTRY_EMPTY
);
return
(
pud_t
*
)
table
;
}
#define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud)
static
inline
pmd_t
*
pmd_alloc_one
(
struct
mm_struct
*
mm
,
unsigned
long
vmaddr
)
{
...
...
@@ -88,8 +94,21 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
}
#define pmd_free(mm, pmd) crst_table_free(mm, (unsigned long *) pmd)
#define pgd_populate(mm, pgd, pud) BUG()
#define pgd_populate_kernel(mm, pgd, pud) BUG()
static
inline
void
pgd_populate_kernel
(
struct
mm_struct
*
mm
,
pgd_t
*
pgd
,
pud_t
*
pud
)
{
pgd_val
(
*
pgd
)
=
_REGION2_ENTRY
|
__pa
(
pud
);
}
static
inline
void
pgd_populate
(
struct
mm_struct
*
mm
,
pgd_t
*
pgd
,
pud_t
*
pud
)
{
pgd_t
*
shadow_pgd
=
get_shadow_table
(
pgd
);
pud_t
*
shadow_pud
=
get_shadow_table
(
pud
);
if
(
shadow_pgd
&&
shadow_pud
)
pgd_populate_kernel
(
mm
,
shadow_pgd
,
shadow_pud
);
pgd_populate_kernel
(
mm
,
pgd
,
pud
);
}
static
inline
void
pud_populate_kernel
(
struct
mm_struct
*
mm
,
pud_t
*
pud
,
pmd_t
*
pmd
)
...
...
include/asm-s390/pgtable.h
浏览文件 @
5a216a20
...
...
@@ -63,7 +63,7 @@ extern char empty_zero_page[PAGE_SIZE];
#else
/* __s390x__ */
# define PMD_SHIFT 20
# define PUD_SHIFT 31
# define PGDIR_SHIFT
31
# define PGDIR_SHIFT
42
#endif
/* __s390x__ */
#define PMD_SIZE (1UL << PMD_SHIFT)
...
...
@@ -82,10 +82,11 @@ extern char empty_zero_page[PAGE_SIZE];
#define PTRS_PER_PTE 256
#ifndef __s390x__
#define PTRS_PER_PMD 1
#define PTRS_PER_PUD 1
#else
/* __s390x__ */
#define PTRS_PER_PMD 2048
#define PTRS_PER_PUD 2048
#endif
/* __s390x__ */
#define PTRS_PER_PUD 1
#define PTRS_PER_PGD 2048
#define FIRST_USER_ADDRESS 0
...
...
@@ -418,9 +419,23 @@ static inline int pud_bad(pud_t pud) { return 0; }
#else
/* __s390x__ */
static
inline
int
pgd_present
(
pgd_t
pgd
)
{
return
1
;
}
static
inline
int
pgd_none
(
pgd_t
pgd
)
{
return
0
;
}
static
inline
int
pgd_bad
(
pgd_t
pgd
)
{
return
0
;
}
static
inline
int
pgd_present
(
pgd_t
pgd
)
{
return
(
pgd_val
(
pgd
)
&
_REGION_ENTRY_ORIGIN
)
!=
0UL
;
}
static
inline
int
pgd_none
(
pgd_t
pgd
)
{
return
(
pgd_val
(
pgd
)
&
_REGION_ENTRY_INV
)
!=
0UL
;
}
static
inline
int
pgd_bad
(
pgd_t
pgd
)
{
unsigned
long
mask
=
~
_REGION_ENTRY_ORIGIN
&
~
_REGION_ENTRY_INV
&
~
_REGION_ENTRY_TYPE_MASK
&
~
_REGION_ENTRY_LENGTH
;
return
(
pgd_val
(
pgd
)
&
mask
)
!=
0
;
}
static
inline
int
pud_present
(
pud_t
pud
)
{
...
...
@@ -434,8 +449,10 @@ static inline int pud_none(pud_t pud)
static
inline
int
pud_bad
(
pud_t
pud
)
{
unsigned
long
mask
=
~
_REGION_ENTRY_ORIGIN
&
~
_REGION_ENTRY_INV
;
return
(
pud_val
(
pud
)
&
mask
)
!=
_REGION3_ENTRY
;
unsigned
long
mask
=
~
_REGION_ENTRY_ORIGIN
&
~
_REGION_ENTRY_INV
&
~
_REGION_ENTRY_TYPE_MASK
&
~
_REGION_ENTRY_LENGTH
;
return
(
pud_val
(
pud
)
&
mask
)
!=
0
;
}
#endif
/* __s390x__ */
...
...
@@ -516,7 +533,19 @@ static inline int pte_young(pte_t pte)
#else
/* __s390x__ */
#define pgd_clear(pgd) do { } while (0)
static
inline
void
pgd_clear_kernel
(
pgd_t
*
pgd
)
{
pgd_val
(
*
pgd
)
=
_REGION2_ENTRY_EMPTY
;
}
static
inline
void
pgd_clear
(
pgd_t
*
pgd
)
{
pgd_t
*
shadow
=
get_shadow_table
(
pgd
);
pgd_clear_kernel
(
pgd
);
if
(
shadow
)
pgd_clear_kernel
(
shadow
);
}
static
inline
void
pud_clear_kernel
(
pud_t
*
pud
)
{
...
...
@@ -808,9 +837,13 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
#define pgd_deref(pgd) (
{ BUG(); 0UL; }
)
#define pgd_deref(pgd) (
pgd_val(pgd) & _REGION_ENTRY_ORIGIN
)
#define pud_offset(pgd, address) ((pud_t *) pgd)
static
inline
pud_t
*
pud_offset
(
pgd_t
*
pgd
,
unsigned
long
address
)
{
pud_t
*
pud
=
(
pud_t
*
)
pgd_deref
(
*
pgd
);
return
pud
+
pud_index
(
address
);
}
static
inline
pmd_t
*
pmd_offset
(
pud_t
*
pud
,
unsigned
long
address
)
{
...
...
include/asm-s390/processor.h
浏览文件 @
5a216a20
...
...
@@ -64,24 +64,28 @@ extern int get_cpu_capability(unsigned int *);
*/
#ifndef __s390x__
# define TASK_SIZE (0x80000000UL)
# define TASK_UNMAPPED_BASE (TASK_SIZE / 2)
# define DEFAULT_TASK_SIZE (0x80000000UL)
#define TASK_SIZE (1UL << 31)
#define TASK_UNMAPPED_BASE (1UL << 30)
#else
/* __s390x__ */
#
define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk,
TIF_31BIT) ? \
(
0x80000000UL) : (0x40000000000UL
))
#
define TASK_SIZE TASK_SIZE_OF(current)
# define TASK_UNMAPPED_BASE (TASK_SIZE / 2
)
#
define DEFAULT_TASK_SIZE (0x40000000000UL
)
#
define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk,
TIF_31BIT) ? \
(
1UL << 31) : (1UL << 53
))
#
define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \
(1UL << 30) : (1UL << 41)
)
#
define TASK_SIZE TASK_SIZE_OF(current
)
#endif
/* __s390x__ */
#ifdef __KERNEL__
#define STACK_TOP TASK_SIZE
#define STACK_TOP_MAX DEFAULT_TASK_SIZE
#ifndef __s390x__
#define STACK_TOP (1UL << 31)
#else
/* __s390x__ */
#define STACK_TOP (1UL << (test_thread_flag(TIF_31BIT) ? 31:53))
#endif
/* __s390x__ */
#define STACK_TOP_MAX STACK_TOP
#endif
...
...
include/asm-s390/tlb.h
浏览文件 @
5a216a20
...
...
@@ -38,7 +38,7 @@ struct mmu_gather {
struct
mm_struct
*
mm
;
unsigned
int
fullmm
;
unsigned
int
nr_ptes
;
unsigned
int
nr_p
m
ds
;
unsigned
int
nr_p
x
ds
;
void
*
array
[
TLB_NR_PTRS
];
};
...
...
@@ -53,7 +53,7 @@ static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm,
tlb
->
fullmm
=
full_mm_flush
||
(
num_online_cpus
()
==
1
)
||
(
atomic_read
(
&
mm
->
mm_users
)
<=
1
&&
mm
==
current
->
active_mm
);
tlb
->
nr_ptes
=
0
;
tlb
->
nr_p
m
ds
=
TLB_NR_PTRS
;
tlb
->
nr_p
x
ds
=
TLB_NR_PTRS
;
if
(
tlb
->
fullmm
)
__tlb_flush_mm
(
mm
);
return
tlb
;
...
...
@@ -62,12 +62,13 @@ static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm,
static
inline
void
tlb_flush_mmu
(
struct
mmu_gather
*
tlb
,
unsigned
long
start
,
unsigned
long
end
)
{
if
(
!
tlb
->
fullmm
&&
(
tlb
->
nr_ptes
>
0
||
tlb
->
nr_p
m
ds
<
TLB_NR_PTRS
))
if
(
!
tlb
->
fullmm
&&
(
tlb
->
nr_ptes
>
0
||
tlb
->
nr_p
x
ds
<
TLB_NR_PTRS
))
__tlb_flush_mm
(
tlb
->
mm
);
while
(
tlb
->
nr_ptes
>
0
)
pte_free
(
tlb
->
mm
,
tlb
->
array
[
--
tlb
->
nr_ptes
]);
while
(
tlb
->
nr_pmds
<
TLB_NR_PTRS
)
pmd_free
(
tlb
->
mm
,
(
pmd_t
*
)
tlb
->
array
[
tlb
->
nr_pmds
++
]);
while
(
tlb
->
nr_pxds
<
TLB_NR_PTRS
)
/* pgd_free frees the pointer as region or segment table */
pgd_free
(
tlb
->
mm
,
tlb
->
array
[
tlb
->
nr_pxds
++
]);
}
static
inline
void
tlb_finish_mmu
(
struct
mmu_gather
*
tlb
,
...
...
@@ -99,7 +100,7 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte)
{
if
(
!
tlb
->
fullmm
)
{
tlb
->
array
[
tlb
->
nr_ptes
++
]
=
pte
;
if
(
tlb
->
nr_ptes
>=
tlb
->
nr_p
m
ds
)
if
(
tlb
->
nr_ptes
>=
tlb
->
nr_p
x
ds
)
tlb_flush_mmu
(
tlb
,
0
,
0
);
}
else
pte_free
(
tlb
->
mm
,
pte
);
...
...
@@ -113,15 +114,29 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
{
#ifdef __s390x__
if
(
!
tlb
->
fullmm
)
{
tlb
->
array
[
--
tlb
->
nr_p
mds
]
=
(
struct
page
*
)
pmd
;
if
(
tlb
->
nr_ptes
>=
tlb
->
nr_p
m
ds
)
tlb
->
array
[
--
tlb
->
nr_p
xds
]
=
pmd
;
if
(
tlb
->
nr_ptes
>=
tlb
->
nr_p
x
ds
)
tlb_flush_mmu
(
tlb
,
0
,
0
);
}
else
pmd_free
(
tlb
->
mm
,
pmd
);
#endif
}
#define pud_free_tlb(tlb, pud) do { } while (0)
/*
* pud_free_tlb frees a pud table and clears the CRSTE for the
* region third table entry from the tlb.
*/
static
inline
void
pud_free_tlb
(
struct
mmu_gather
*
tlb
,
pud_t
*
pud
)
{
#ifdef __s390x__
if
(
!
tlb
->
fullmm
)
{
tlb
->
array
[
--
tlb
->
nr_pxds
]
=
pud
;
if
(
tlb
->
nr_ptes
>=
tlb
->
nr_pxds
)
tlb_flush_mmu
(
tlb
,
0
,
0
);
}
else
pud_free
(
tlb
->
mm
,
pud
);
#endif
}
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录