Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
dcc1e8dd
cloud-kernel
项目概览
openanolis
/
cloud-kernel
1 年多 前同步成功
通知
160
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
dcc1e8dd
编写于
3月 22, 2006
作者:
D
David S. Miller
提交者:
David S. Miller
3月 22, 2006
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[SPARC64]: Add a secondary TSB for hugepage mappings.
Signed-off-by:
N
David S. Miller
<
davem@davemloft.net
>
上级
14778d90
变更
13
隐藏空白更改
内联
并排
Showing
13 changed file
with
462 addition
and
201 deletion
+462
-201
arch/sparc64/Kconfig
arch/sparc64/Kconfig
+2
-2
arch/sparc64/kernel/sun4v_tlb_miss.S
arch/sparc64/kernel/sun4v_tlb_miss.S
+22
-17
arch/sparc64/kernel/traps.c
arch/sparc64/kernel/traps.c
+20
-1
arch/sparc64/kernel/tsb.S
arch/sparc64/kernel/tsb.S
+160
-50
arch/sparc64/mm/fault.c
arch/sparc64/mm/fault.c
+12
-3
arch/sparc64/mm/hugetlbpage.c
arch/sparc64/mm/hugetlbpage.c
+19
-9
arch/sparc64/mm/init.c
arch/sparc64/mm/init.c
+19
-2
arch/sparc64/mm/tsb.c
arch/sparc64/mm/tsb.c
+147
-87
include/asm-sparc64/cpudata.h
include/asm-sparc64/cpudata.h
+4
-1
include/asm-sparc64/mmu.h
include/asm-sparc64/mmu.h
+25
-4
include/asm-sparc64/mmu_context.h
include/asm-sparc64/mmu_context.h
+13
-8
include/asm-sparc64/page.h
include/asm-sparc64/page.h
+17
-17
include/asm-sparc64/pgtable.h
include/asm-sparc64/pgtable.h
+2
-0
未找到文件。
arch/sparc64/Kconfig
浏览文件 @
dcc1e8dd
...
...
@@ -175,11 +175,11 @@ config HUGETLB_PAGE_SIZE_4MB
bool "4MB"
config HUGETLB_PAGE_SIZE_512K
depends on !SPARC64_PAGE_SIZE_4MB
depends on !SPARC64_PAGE_SIZE_4MB
&& !SPARC64_PAGE_SIZE_512KB
bool "512K"
config HUGETLB_PAGE_SIZE_64K
depends on !SPARC64_PAGE_SIZE_4MB && !SPARC64_PAGE_SIZE_512KB
depends on !SPARC64_PAGE_SIZE_4MB && !SPARC64_PAGE_SIZE_512KB
&& !SPARC64_PAGE_SIZE_64K
bool "64K"
endchoice
...
...
arch/sparc64/kernel/sun4v_tlb_miss.S
浏览文件 @
dcc1e8dd
...
...
@@ -29,15 +29,15 @@
*
*
index_mask
=
(
512
<<
(
tsb_reg
&
0x7
UL
))
-
1
UL
;
*
tsb_base
=
tsb_reg
&
~
0x7
UL
;
*
tsb_index
=
((
vaddr
>>
PAGE
_SHIFT
)
&
tsb_mask
)
;
*
tsb_index
=
((
vaddr
>>
HASH
_SHIFT
)
&
tsb_mask
)
;
*
tsb_ptr
=
tsb_base
+
(
tsb_index
*
16
)
;
*/
#define COMPUTE_TSB_PTR(TSB_PTR, VADDR,
TMP1, TMP2)
\
#define COMPUTE_TSB_PTR(TSB_PTR, VADDR,
HASH_SHIFT, TMP1, TMP2)
\
and
TSB_PTR
,
0x7
,
TMP1
; \
mov
512
,
TMP2
; \
andn
TSB_PTR
,
0x7
,
TSB_PTR
; \
sllx
TMP2
,
TMP1
,
TMP2
; \
srlx
VADDR
,
PAGE
_SHIFT
,
TMP1
; \
srlx
VADDR
,
HASH
_SHIFT
,
TMP1
; \
sub
TMP2
,
1
,
TMP2
; \
and
TMP1
,
TMP2
,
TMP1
; \
sllx
TMP1
,
4
,
TMP1
; \
...
...
@@ -53,7 +53,7 @@ sun4v_itlb_miss:
LOAD_ITLB_INFO
(%
g2
,
%
g4
,
%
g5
)
COMPUTE_TAG_TARGET
(%
g6
,
%
g4
,
%
g5
,
kvmap_itlb_4v
)
COMPUTE_TSB_PTR
(%
g1
,
%
g4
,
%
g3
,
%
g7
)
COMPUTE_TSB_PTR
(%
g1
,
%
g4
,
PAGE_SHIFT
,
%
g3
,
%
g7
)
/
*
Load
TSB
tag
/
pte
into
%
g2
/%
g3
and
compare
the
tag
.
*/
ldda
[%
g1
]
ASI_QUAD_LDD_PHYS_4V
,
%
g2
...
...
@@ -99,7 +99,7 @@ sun4v_dtlb_miss:
LOAD_DTLB_INFO
(%
g2
,
%
g4
,
%
g5
)
COMPUTE_TAG_TARGET
(%
g6
,
%
g4
,
%
g5
,
kvmap_dtlb_4v
)
COMPUTE_TSB_PTR
(%
g1
,
%
g4
,
%
g3
,
%
g7
)
COMPUTE_TSB_PTR
(%
g1
,
%
g4
,
PAGE_SHIFT
,
%
g3
,
%
g7
)
/
*
Load
TSB
tag
/
pte
into
%
g2
/%
g3
and
compare
the
tag
.
*/
ldda
[%
g1
]
ASI_QUAD_LDD_PHYS_4V
,
%
g2
...
...
@@ -171,21 +171,26 @@ sun4v_dtsb_miss:
/
*
fallthrough
*/
/
*
Create
TSB
pointer
into
%
g1
.
This
is
something
like
:
*
*
index_mask
=
(
512
<<
(
tsb_reg
&
0x7
UL
))
-
1
UL
;
*
tsb_base
=
tsb_reg
&
~
0x7
UL
;
*
tsb_index
=
((
vaddr
>>
PAGE_SHIFT
)
&
tsb_mask
)
;
*
tsb_ptr
=
tsb_base
+
(
tsb_index
*
16
)
;
*/
sun4v_tsb_miss_common
:
COMPUTE_TSB_PTR
(%
g1
,
%
g4
,
%
g5
,
%
g7
)
COMPUTE_TSB_PTR
(%
g1
,
%
g4
,
PAGE_SHIFT
,
%
g5
,
%
g7
)
/
*
Branch
directly
to
page
table
lookup
.
We
have
SCRATCHPAD_MMU_MISS
*
still
in
%
g2
,
so
it
's quite trivial to get at the PGD PHYS value
*
so
we
can
preload
it
into
%
g7
.
*/
sub
%
g2
,
TRAP_PER_CPU_FAULT_INFO
,
%
g2
#ifdef CONFIG_HUGETLB_PAGE
mov
SCRATCHPAD_UTSBREG2
,
%
g5
ldxa
[%
g5
]
ASI_SCRATCHPAD
,
%
g5
cmp
%
g5
,
-
1
be
,
pt
%
xcc
,
80
f
nop
COMPUTE_TSB_PTR
(%
g5
,
%
g4
,
HPAGE_SHIFT
,
%
g2
,
%
g7
)
/
*
That
clobbered
%
g2
,
reload
it
.
*/
ldxa
[%
g0
]
ASI_SCRATCHPAD
,
%
g2
sub
%
g2
,
TRAP_PER_CPU_FAULT_INFO
,
%
g2
80
:
stx
%
g5
,
[%
g2
+
TRAP_PER_CPU_TSB_HUGE_TEMP
]
#endif
ba
,
pt
%
xcc
,
tsb_miss_page_table_walk_sun4v_fastpath
ldx
[%
g2
+
TRAP_PER_CPU_PGD_PADDR
],
%
g7
...
...
arch/sparc64/kernel/traps.c
浏览文件 @
dcc1e8dd
...
...
@@ -2482,6 +2482,7 @@ void init_cur_cpu_trap(struct thread_info *t)
extern
void
thread_info_offsets_are_bolixed_dave
(
void
);
extern
void
trap_per_cpu_offsets_are_bolixed_dave
(
void
);
extern
void
tsb_config_offsets_are_bolixed_dave
(
void
);
/* Only invoked on boot processor. */
void
__init
trap_init
(
void
)
...
...
@@ -2535,9 +2536,27 @@ void __init trap_init(void)
(
TRAP_PER_CPU_CPU_MONDO_BLOCK_PA
!=
offsetof
(
struct
trap_per_cpu
,
cpu_mondo_block_pa
))
||
(
TRAP_PER_CPU_CPU_LIST_PA
!=
offsetof
(
struct
trap_per_cpu
,
cpu_list_pa
)))
offsetof
(
struct
trap_per_cpu
,
cpu_list_pa
))
||
(
TRAP_PER_CPU_TSB_HUGE
!=
offsetof
(
struct
trap_per_cpu
,
tsb_huge
))
||
(
TRAP_PER_CPU_TSB_HUGE_TEMP
!=
offsetof
(
struct
trap_per_cpu
,
tsb_huge_temp
)))
trap_per_cpu_offsets_are_bolixed_dave
();
if
((
TSB_CONFIG_TSB
!=
offsetof
(
struct
tsb_config
,
tsb
))
||
(
TSB_CONFIG_RSS_LIMIT
!=
offsetof
(
struct
tsb_config
,
tsb_rss_limit
))
||
(
TSB_CONFIG_NENTRIES
!=
offsetof
(
struct
tsb_config
,
tsb_nentries
))
||
(
TSB_CONFIG_REG_VAL
!=
offsetof
(
struct
tsb_config
,
tsb_reg_val
))
||
(
TSB_CONFIG_MAP_VADDR
!=
offsetof
(
struct
tsb_config
,
tsb_map_vaddr
))
||
(
TSB_CONFIG_MAP_PTE
!=
offsetof
(
struct
tsb_config
,
tsb_map_pte
)))
tsb_config_offsets_are_bolixed_dave
();
/* Attach to the address space of init_task. On SMP we
* do this in smp.c:smp_callin for other cpus.
*/
...
...
arch/sparc64/kernel/tsb.S
浏览文件 @
dcc1e8dd
...
...
@@ -3,8 +3,13 @@
*
Copyright
(
C
)
2006
David
S
.
Miller
<
davem
@
davemloft
.
net
>
*/
#include <linux/config.h>
#include <asm/tsb.h>
#include <asm/hypervisor.h>
#include <asm/page.h>
#include <asm/cpudata.h>
#include <asm/mmu.h>
.
text
.
align
32
...
...
@@ -34,34 +39,124 @@ tsb_miss_itlb:
ldxa
[%
g4
]
ASI_IMMU
,
%
g4
/
*
At
this
point
we
have
:
*
%
g1
--
TSB
entry
address
*
%
g1
--
PAGE_SIZE
TSB
entry
address
*
%
g3
--
FAULT_CODE_
{
D
,
I
}
TLB
*
%
g4
--
missing
virtual
address
*
%
g6
--
TAG
TARGET
(
vaddr
>>
22
)
*/
tsb_miss_page_table_walk
:
TRAP_LOAD_
PGD_PHYS
(%
g7
,
%
g5
)
TRAP_LOAD_
TRAP_BLOCK
(%
g7
,
%
g5
)
/
*
And
now
we
have
the
PGD
base
physical
address
in
%
g7
.
*/
tsb_miss_page_table_walk_sun4v_fastpath
:
USER_PGTABLE_WALK_TL1
(%
g4
,
%
g7
,
%
g5
,
%
g2
,
tsb_do_fault
)
/
*
Before
committing
to
a
full
page
table
walk
,
*
check
the
huge
page
TSB
.
*/
#ifdef CONFIG_HUGETLB_PAGE
661
:
ldx
[%
g7
+
TRAP_PER_CPU_TSB_HUGE
],
%
g5
nop
.
section
.
sun4v_2insn_patch
,
"ax"
.
word
661
b
mov
SCRATCHPAD_UTSBREG2
,
%
g5
ldxa
[%
g5
]
ASI_SCRATCHPAD
,
%
g5
.
previous
cmp
%
g5
,
-
1
be
,
pt
%
xcc
,
80
f
nop
/
*
We
need
an
aligned
pair
of
registers
containing
2
values
*
which
can
be
easily
rematerialized
.
%
g6
and
%
g7
foot
the
*
bill
just
nicely
.
We
'll save %g6 away into %g2 for the
*
huge
page
TSB
TAG
comparison
.
*
*
Perform
a
huge
page
TSB
lookup
.
*/
mov
%
g6
,
%
g2
and
%
g5
,
0x7
,
%
g6
mov
512
,
%
g7
andn
%
g5
,
0x7
,
%
g5
sllx
%
g7
,
%
g6
,
%
g7
srlx
%
g4
,
HPAGE_SHIFT
,
%
g6
sub
%
g7
,
1
,
%
g7
and
%
g6
,
%
g7
,
%
g6
sllx
%
g6
,
4
,
%
g6
add
%
g5
,
%
g6
,
%
g5
TSB_LOAD_QUAD
(%
g5
,
%
g6
)
cmp
%
g6
,
%
g2
be
,
a
,
pt
%
xcc
,
tsb_tlb_reload
mov
%
g7
,
%
g5
/
*
No
match
,
remember
the
huge
page
TSB
entry
address
,
*
and
restore
%
g6
and
%
g7
.
*/
TRAP_LOAD_TRAP_BLOCK
(%
g7
,
%
g6
)
srlx
%
g4
,
22
,
%
g6
80
:
stx
%
g5
,
[%
g7
+
TRAP_PER_CPU_TSB_HUGE_TEMP
]
#endif
ldx
[%
g7
+
TRAP_PER_CPU_PGD_PADDR
],
%
g7
/
*
At
this
point
we
have
:
*
%
g1
--
TSB
entry
address
*
%
g3
--
FAULT_CODE_
{
D
,
I
}
TLB
*
%
g
5
--
physical
address
of
PTE
in
Linux
page
table
s
*
%
g
4
--
missing
virtual
addres
s
*
%
g6
--
TAG
TARGET
(
vaddr
>>
22
)
*
%
g7
--
page
table
physical
address
*
*
We
know
that
both
the
base
PAGE_SIZE
TSB
and
the
HPAGE_SIZE
*
TSB
both
lack
a
matching
entry
.
*/
tsb_
reload
:
TSB_LOCK_TAG
(%
g1
,
%
g2
,
%
g7
)
tsb_
miss_page_table_walk_sun4v_fastpath
:
USER_PGTABLE_WALK_TL1
(%
g4
,
%
g7
,
%
g5
,
%
g2
,
tsb_do_fault
)
/
*
Load
and
check
PTE
.
*/
ldxa
[%
g5
]
ASI_PHYS_USE_EC
,
%
g5
mov
1
,
%
g7
sllx
%
g7
,
TSB_TAG_INVALID_BIT
,
%
g7
brgez
,
a
,
pn
%
g5
,
tsb_do_fault
TSB_STORE
(%
g1
,
%
g7
)
brgez
,
pn
%
g5
,
tsb_do_fault
nop
#ifdef CONFIG_HUGETLB_PAGE
661
:
sethi
%
uhi
(
_PAGE_SZALL_4U
),
%
g7
sllx
%
g7
,
32
,
%
g7
.
section
.
sun4v_2insn_patch
,
"ax"
.
word
661
b
mov
_PAGE_SZALL_4V
,
%
g7
nop
.
previous
and
%
g5
,
%
g7
,
%
g2
661
:
sethi
%
uhi
(
_PAGE_SZHUGE_4U
),
%
g7
sllx
%
g7
,
32
,
%
g7
.
section
.
sun4v_2insn_patch
,
"ax"
.
word
661
b
mov
_PAGE_SZHUGE_4V
,
%
g7
nop
.
previous
cmp
%
g2
,
%
g7
bne
,
pt
%
xcc
,
60
f
nop
/
*
It
is
a
huge
page
,
use
huge
page
TSB
entry
address
we
*
calculated
above
.
*/
TRAP_LOAD_TRAP_BLOCK
(%
g7
,
%
g2
)
ldx
[%
g7
+
TRAP_PER_CPU_TSB_HUGE_TEMP
],
%
g2
cmp
%
g2
,
-
1
movne
%
xcc
,
%
g2
,
%
g1
60
:
#endif
/
*
At
this
point
we
have
:
*
%
g1
--
TSB
entry
address
*
%
g3
--
FAULT_CODE_
{
D
,
I
}
TLB
*
%
g5
--
valid
PTE
*
%
g6
--
TAG
TARGET
(
vaddr
>>
22
)
*/
tsb_reload
:
TSB_LOCK_TAG
(%
g1
,
%
g2
,
%
g7
)
TSB_WRITE
(%
g1
,
%
g5
,
%
g6
)
/
*
Finally
,
load
TLB
and
return
from
trap
.
*/
...
...
@@ -240,10 +335,9 @@ tsb_flush:
*
schedule
()
time
.
*
*
%
o0
:
page
table
physical
address
*
%
o1
:
TSB
register
value
*
%
o2
:
TSB
virtual
address
*
%
o3
:
TSB
mapping
locked
PTE
*
%
o4
:
Hypervisor
TSB
descriptor
physical
address
*
%
o1
:
TSB
base
config
pointer
*
%
o2
:
TSB
huge
config
pointer
,
or
NULL
if
none
*
%
o3
:
Hypervisor
TSB
descriptor
physical
address
*
*
We
have
to
run
this
whole
thing
with
interrupts
*
disabled
so
that
the
current
cpu
doesn
't change
...
...
@@ -253,63 +347,79 @@ tsb_flush:
.
globl
__tsb_context_switch
.
type
__tsb_context_switch
,#
function
__tsb_context_switch
:
rdpr
%
pstate
,
%
o5
wrpr
%
o5
,
PSTATE_IE
,
%
pstate
rdpr
%
pstate
,
%
g1
wrpr
%
g1
,
PSTATE_IE
,
%
pstate
TRAP_LOAD_TRAP_BLOCK
(%
g2
,
%
g3
)
ldub
[%
g6
+
TI_CPU
],
%
g1
sethi
%
hi
(
trap_block
),
%
g2
sllx
%
g1
,
TRAP_BLOCK_SZ_SHIFT
,
%
g1
or
%
g2
,
%
lo
(
trap_block
),
%
g2
add
%
g2
,
%
g1
,
%
g2
stx
%
o0
,
[%
g2
+
TRAP_PER_CPU_PGD_PADDR
]
sethi
%
hi
(
tlb_type
),
%
g1
lduw
[%
g1
+
%
lo
(
tlb_type
)],
%
g1
cmp
%
g1
,
3
bne
,
pt
%
icc
,
1
f
ldx
[%
o1
+
TSB_CONFIG_REG_VAL
],
%
o0
brz
,
pt
%
o2
,
1
f
mov
-
1
,
%
g3
ldx
[%
o2
+
TSB_CONFIG_REG_VAL
],
%
g3
1
:
stx
%
g3
,
[%
g2
+
TRAP_PER_CPU_TSB_HUGE
]
sethi
%
hi
(
tlb_type
),
%
g2
lduw
[%
g2
+
%
lo
(
tlb_type
)],
%
g2
cmp
%
g2
,
3
bne
,
pt
%
icc
,
50
f
nop
/
*
Hypervisor
TSB
switch
.
*/
mov
SCRATCHPAD_UTSBREG1
,
%
g1
stxa
%
o1
,
[%
g1
]
ASI_SCRATCHPAD
mov
-
1
,
%
g2
mov
SCRATCHPAD_UTSBREG2
,
%
g1
stxa
%
g2
,
[%
g1
]
ASI_SCRATCHPAD
/
*
Save
away
%
o5
's %pstate, we have to use %o5 for
*
the
hypervisor
call
.
*/
mov
%
o5
,
%
g1
mov
SCRATCHPAD_UTSBREG1
,
%
o5
stxa
%
o0
,
[%
o5
]
ASI_SCRATCHPAD
mov
SCRATCHPAD_UTSBREG2
,
%
o5
stxa
%
g3
,
[%
o5
]
ASI_SCRATCHPAD
mov
2
,
%
o0
cmp
%
g3
,
-
1
move
%
xcc
,
1
,
%
o0
mov
HV_FAST_MMU_TSB_CTXNON0
,
%
o5
mov
1
,
%
o0
mov
%
o4
,
%
o1
mov
%
o3
,
%
o1
ta
HV_FAST_TRAP
/
*
Finish
up
and
restore
%
o5
.
*/
/
*
Finish
up
.
*/
ba
,
pt
%
xcc
,
9
f
mov
%
g1
,
%
o5
nop
/
*
SUN4U
TSB
switch
.
*/
1
:
mov
TSB_REG
,
%
g1
stxa
%
o
1
,
[%
g1
]
ASI_DMMU
50
:
mov
TSB_REG
,
%
o5
stxa
%
o
0
,
[%
o5
]
ASI_DMMU
membar
#
Sync
stxa
%
o
1
,
[%
g1
]
ASI_IMMU
stxa
%
o
0
,
[%
o5
]
ASI_IMMU
membar
#
Sync
2
:
brz
%
o2
,
9
f
nop
2
:
ldx
[%
o1
+
TSB_CONFIG_MAP_VADDR
],
%
o4
brz
%
o4
,
9
f
ldx
[%
o1
+
TSB_CONFIG_MAP_PTE
],
%
o5
sethi
%
hi
(
sparc64_highest_unlocked_tlb_ent
),
%
g2
mov
TLB_TAG_ACCESS
,
%
g
1
mov
TLB_TAG_ACCESS
,
%
g
3
lduw
[%
g2
+
%
lo
(
sparc64_highest_unlocked_tlb_ent
)],
%
g2
stxa
%
o
2
,
[%
g1
]
ASI_DMMU
stxa
%
o
4
,
[%
g3
]
ASI_DMMU
membar
#
Sync
sllx
%
g2
,
3
,
%
g2
stxa
%
o3
,
[%
g2
]
ASI_DTLB_DATA_ACCESS
stxa
%
o5
,
[%
g2
]
ASI_DTLB_DATA_ACCESS
membar
#
Sync
brz
,
pt
%
o2
,
9
f
nop
ldx
[%
o2
+
TSB_CONFIG_MAP_VADDR
],
%
o4
ldx
[%
o2
+
TSB_CONFIG_MAP_PTE
],
%
o5
mov
TLB_TAG_ACCESS
,
%
g3
stxa
%
o4
,
[%
g3
]
ASI_DMMU
membar
#
Sync
sub
%
g2
,
(
1
<<
3
),
%
g2
stxa
%
o5
,
[%
g2
]
ASI_DTLB_DATA_ACCESS
membar
#
Sync
9
:
wrpr
%
o5
,
%
pstate
wrpr
%
g1
,
%
pstate
retl
nop
...
...
arch/sparc64/mm/fault.c
浏览文件 @
dcc1e8dd
...
...
@@ -410,9 +410,18 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
up_read
(
&
mm
->
mmap_sem
);
mm_rss
=
get_mm_rss
(
mm
);
if
(
unlikely
(
mm_rss
>=
mm
->
context
.
tsb_rss_limit
))
tsb_grow
(
mm
,
mm_rss
);
#ifdef CONFIG_HUGETLB_PAGE
mm_rss
-=
(
mm
->
context
.
huge_pte_count
*
(
HPAGE_SIZE
/
PAGE_SIZE
));
#endif
if
(
unlikely
(
mm_rss
>=
mm
->
context
.
tsb_block
[
MM_TSB_BASE
].
tsb_rss_limit
))
tsb_grow
(
mm
,
MM_TSB_BASE
,
mm_rss
);
#ifdef CONFIG_HUGETLB_PAGE
mm_rss
=
mm
->
context
.
huge_pte_count
;
if
(
unlikely
(
mm_rss
>=
mm
->
context
.
tsb_block
[
MM_TSB_HUGE
].
tsb_rss_limit
))
tsb_grow
(
mm
,
MM_TSB_HUGE
,
mm_rss
);
#endif
return
;
/*
...
...
arch/sparc64/mm/hugetlbpage.c
浏览文件 @
dcc1e8dd
...
...
@@ -199,13 +199,11 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
pte_t
*
pte
=
NULL
;
pgd
=
pgd_offset
(
mm
,
addr
);
if
(
pgd
)
{
pud
=
pud_offset
(
pgd
,
addr
);
if
(
pud
)
{
pmd
=
pmd_alloc
(
mm
,
pud
,
addr
);
if
(
pmd
)
pte
=
pte_alloc_map
(
mm
,
pmd
,
addr
);
}
pud
=
pud_alloc
(
mm
,
pgd
,
addr
);
if
(
pud
)
{
pmd
=
pmd_alloc
(
mm
,
pud
,
addr
);
if
(
pmd
)
pte
=
pte_alloc_map
(
mm
,
pmd
,
addr
);
}
return
pte
;
}
...
...
@@ -231,13 +229,14 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
return
pte
;
}
#define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0)
void
set_huge_pte_at
(
struct
mm_struct
*
mm
,
unsigned
long
addr
,
pte_t
*
ptep
,
pte_t
entry
)
{
int
i
;
if
(
!
pte_present
(
*
ptep
)
&&
pte_present
(
entry
))
mm
->
context
.
huge_pte_count
++
;
for
(
i
=
0
;
i
<
(
1
<<
HUGETLB_PAGE_ORDER
);
i
++
)
{
set_pte_at
(
mm
,
addr
,
ptep
,
entry
);
ptep
++
;
...
...
@@ -253,6 +252,8 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
int
i
;
entry
=
*
ptep
;
if
(
pte_present
(
entry
))
mm
->
context
.
huge_pte_count
--
;
for
(
i
=
0
;
i
<
(
1
<<
HUGETLB_PAGE_ORDER
);
i
++
)
{
pte_clear
(
mm
,
addr
,
ptep
);
...
...
@@ -302,6 +303,15 @@ static void context_reload(void *__data)
void
hugetlb_prefault_arch_hook
(
struct
mm_struct
*
mm
)
{
struct
tsb_config
*
tp
=
&
mm
->
context
.
tsb_block
[
MM_TSB_HUGE
];
if
(
likely
(
tp
->
tsb
!=
NULL
))
return
;
tsb_grow
(
mm
,
MM_TSB_HUGE
,
0
);
tsb_context_switch
(
mm
);
smp_tsb_sync
(
mm
);
/* On UltraSPARC-III+ and later, configure the second half of
* the Data-TLB for huge pages.
*/
...
...
arch/sparc64/mm/init.c
浏览文件 @
dcc1e8dd
...
...
@@ -283,6 +283,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
struct
mm_struct
*
mm
;
struct
tsb
*
tsb
;
unsigned
long
tag
,
flags
;
unsigned
long
tsb_index
,
tsb_hash_shift
;
if
(
tlb_type
!=
hypervisor
)
{
unsigned
long
pfn
=
pte_pfn
(
pte
);
...
...
@@ -312,10 +313,26 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
mm
=
vma
->
vm_mm
;
tsb_index
=
MM_TSB_BASE
;
tsb_hash_shift
=
PAGE_SHIFT
;
spin_lock_irqsave
(
&
mm
->
context
.
lock
,
flags
);
tsb
=
&
mm
->
context
.
tsb
[(
address
>>
PAGE_SHIFT
)
&
(
mm
->
context
.
tsb_nentries
-
1UL
)];
#ifdef CONFIG_HUGETLB_PAGE
if
(
mm
->
context
.
tsb_block
[
MM_TSB_HUGE
].
tsb
!=
NULL
)
{
if
((
tlb_type
==
hypervisor
&&
(
pte_val
(
pte
)
&
_PAGE_SZALL_4V
)
==
_PAGE_SZHUGE_4V
)
||
(
tlb_type
!=
hypervisor
&&
(
pte_val
(
pte
)
&
_PAGE_SZALL_4U
)
==
_PAGE_SZHUGE_4U
))
{
tsb_index
=
MM_TSB_HUGE
;
tsb_hash_shift
=
HPAGE_SHIFT
;
}
}
#endif
tsb
=
mm
->
context
.
tsb_block
[
tsb_index
].
tsb
;
tsb
+=
((
address
>>
tsb_hash_shift
)
&
(
mm
->
context
.
tsb_block
[
tsb_index
].
tsb_nentries
-
1UL
));
tag
=
(
address
>>
22UL
);
tsb_insert
(
tsb
,
tag
,
pte_val
(
pte
));
...
...
arch/sparc64/mm/tsb.c
浏览文件 @
dcc1e8dd
...
...
@@ -15,9 +15,9 @@
extern
struct
tsb
swapper_tsb
[
KERNEL_TSB_NENTRIES
];
static
inline
unsigned
long
tsb_hash
(
unsigned
long
vaddr
,
unsigned
long
nentries
)
static
inline
unsigned
long
tsb_hash
(
unsigned
long
vaddr
,
unsigned
long
hash_shift
,
unsigned
long
nentries
)
{
vaddr
>>=
PAGE_SHIFT
;
vaddr
>>=
hash_shift
;
return
vaddr
&
(
nentries
-
1
);
}
...
...
@@ -36,7 +36,8 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
unsigned
long
v
;
for
(
v
=
start
;
v
<
end
;
v
+=
PAGE_SIZE
)
{
unsigned
long
hash
=
tsb_hash
(
v
,
KERNEL_TSB_NENTRIES
);
unsigned
long
hash
=
tsb_hash
(
v
,
PAGE_SHIFT
,
KERNEL_TSB_NENTRIES
);
struct
tsb
*
ent
=
&
swapper_tsb
[
hash
];
if
(
tag_compare
(
ent
->
tag
,
v
))
{
...
...
@@ -46,49 +47,91 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
}
}
void
flush_tsb_user
(
struct
mmu_gather
*
mp
)
static
void
__flush_tsb_one
(
struct
mmu_gather
*
mp
,
unsigned
long
hash_shift
,
unsigned
long
tsb
,
unsigned
long
nentries
)
{
struct
mm_struct
*
mm
=
mp
->
mm
;
unsigned
long
nentries
,
base
,
flags
;
struct
tsb
*
tsb
;
int
i
;
spin_lock_irqsave
(
&
mm
->
context
.
lock
,
flags
);
tsb
=
mm
->
context
.
tsb
;
nentries
=
mm
->
context
.
tsb_nentries
;
unsigned
long
i
;
if
(
tlb_type
==
cheetah_plus
||
tlb_type
==
hypervisor
)
base
=
__pa
(
tsb
);
else
base
=
(
unsigned
long
)
tsb
;
for
(
i
=
0
;
i
<
mp
->
tlb_nr
;
i
++
)
{
unsigned
long
v
=
mp
->
vaddrs
[
i
];
unsigned
long
tag
,
ent
,
hash
;
v
&=
~
0x1UL
;
hash
=
tsb_hash
(
v
,
nentries
);
ent
=
base
+
(
hash
*
sizeof
(
struct
tsb
));
hash
=
tsb_hash
(
v
,
hash_shift
,
nentries
);
ent
=
tsb
+
(
hash
*
sizeof
(
struct
tsb
));
tag
=
(
v
>>
22UL
);
tsb_flush
(
ent
,
tag
);
}
}
void
flush_tsb_user
(
struct
mmu_gather
*
mp
)
{
struct
mm_struct
*
mm
=
mp
->
mm
;
unsigned
long
nentries
,
base
,
flags
;
spin_lock_irqsave
(
&
mm
->
context
.
lock
,
flags
);
base
=
(
unsigned
long
)
mm
->
context
.
tsb_block
[
MM_TSB_BASE
].
tsb
;
nentries
=
mm
->
context
.
tsb_block
[
MM_TSB_BASE
].
tsb_nentries
;
if
(
tlb_type
==
cheetah_plus
||
tlb_type
==
hypervisor
)
base
=
__pa
(
base
);
__flush_tsb_one
(
mp
,
PAGE_SHIFT
,
base
,
nentries
);
#ifdef CONFIG_HUGETLB_PAGE
if
(
mm
->
context
.
tsb_block
[
MM_TSB_HUGE
].
tsb
)
{
base
=
(
unsigned
long
)
mm
->
context
.
tsb_block
[
MM_TSB_HUGE
].
tsb
;
nentries
=
mm
->
context
.
tsb_block
[
MM_TSB_HUGE
].
tsb_nentries
;
if
(
tlb_type
==
cheetah_plus
||
tlb_type
==
hypervisor
)
base
=
__pa
(
base
);
__flush_tsb_one
(
mp
,
HPAGE_SHIFT
,
base
,
nentries
);
}
#endif
spin_unlock_irqrestore
(
&
mm
->
context
.
lock
,
flags
);
}
static
void
setup_tsb_params
(
struct
mm_struct
*
mm
,
unsigned
long
tsb_bytes
)
#if defined(CONFIG_SPARC64_PAGE_SIZE_8KB)
#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K
#elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB)
#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_64K
#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_64K
#elif defined(CONFIG_SPARC64_PAGE_SIZE_512KB)
#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_512K
#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_512K
#elif defined(CONFIG_SPARC64_PAGE_SIZE_4MB)
#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_4MB
#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_4MB
#else
#error Broken base page size setting...
#endif
#ifdef CONFIG_HUGETLB_PAGE
#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_64K
#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_64K
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_512K
#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_512K
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB
#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB
#else
#error Broken huge page size setting...
#endif
#endif
static
void
setup_tsb_params
(
struct
mm_struct
*
mm
,
unsigned
long
tsb_idx
,
unsigned
long
tsb_bytes
)
{
unsigned
long
tsb_reg
,
base
,
tsb_paddr
;
unsigned
long
page_sz
,
tte
;
mm
->
context
.
tsb_nentries
=
tsb_bytes
/
sizeof
(
struct
tsb
);
mm
->
context
.
tsb_block
[
tsb_idx
].
tsb_nentries
=
tsb_bytes
/
sizeof
(
struct
tsb
);
base
=
TSBMAP_BASE
;
tte
=
pgprot_val
(
PAGE_KERNEL_LOCKED
);
tsb_paddr
=
__pa
(
mm
->
context
.
tsb
);
tsb_paddr
=
__pa
(
mm
->
context
.
tsb
_block
[
tsb_idx
].
tsb
);
BUG_ON
(
tsb_paddr
&
(
tsb_bytes
-
1UL
));
/* Use the smallest page size that can map the whole TSB
...
...
@@ -147,61 +190,49 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
/* Physical mapping, no locked TLB entry for TSB. */
tsb_reg
|=
tsb_paddr
;
mm
->
context
.
tsb_reg_val
=
tsb_reg
;
mm
->
context
.
tsb_map_vaddr
=
0
;
mm
->
context
.
tsb_map_pte
=
0
;
mm
->
context
.
tsb_
block
[
tsb_idx
].
tsb_
reg_val
=
tsb_reg
;
mm
->
context
.
tsb_
block
[
tsb_idx
].
tsb_
map_vaddr
=
0
;
mm
->
context
.
tsb_
block
[
tsb_idx
].
tsb_
map_pte
=
0
;
}
else
{
tsb_reg
|=
base
;
tsb_reg
|=
(
tsb_paddr
&
(
page_sz
-
1UL
));
tte
|=
(
tsb_paddr
&
~
(
page_sz
-
1UL
));
mm
->
context
.
tsb_reg_val
=
tsb_reg
;
mm
->
context
.
tsb_map_vaddr
=
base
;
mm
->
context
.
tsb_map_pte
=
tte
;
mm
->
context
.
tsb_
block
[
tsb_idx
].
tsb_
reg_val
=
tsb_reg
;
mm
->
context
.
tsb_
block
[
tsb_idx
].
tsb_
map_vaddr
=
base
;
mm
->
context
.
tsb_
block
[
tsb_idx
].
tsb_
map_pte
=
tte
;
}
/* Setup the Hypervisor TSB descriptor. */
if
(
tlb_type
==
hypervisor
)
{
struct
hv_tsb_descr
*
hp
=
&
mm
->
context
.
tsb_descr
;
struct
hv_tsb_descr
*
hp
=
&
mm
->
context
.
tsb_descr
[
tsb_idx
]
;
switch
(
PAGE_SIZE
)
{
case
8192
:
default:
hp
->
pgsz_idx
=
HV_PGSZ_IDX_8K
;
switch
(
tsb_idx
)
{
case
MM_TSB_BASE
:
hp
->
pgsz_idx
=
HV_PGSZ_IDX_BASE
;
break
;
case
64
*
1024
:
hp
->
pgsz_idx
=
HV_PGSZ_IDX_64K
;
break
;
case
512
*
1024
:
hp
->
pgsz_idx
=
HV_PGSZ_IDX_512K
;
break
;
case
4
*
1024
*
1024
:
hp
->
pgsz_idx
=
HV_PGSZ_IDX_4MB
;
#ifdef CONFIG_HUGETLB_PAGE
case
MM_TSB_HUGE
:
hp
->
pgsz_idx
=
HV_PGSZ_IDX_HUGE
;
break
;
#endif
default:
BUG
();
};
hp
->
assoc
=
1
;
hp
->
num_ttes
=
tsb_bytes
/
16
;
hp
->
ctx_idx
=
0
;
switch
(
PAGE_SIZE
)
{
case
8192
:
default:
hp
->
pgsz_mask
=
HV_PGSZ_MASK_8K
;
break
;
case
64
*
1024
:
hp
->
pgsz_mask
=
HV_PGSZ_MASK_64K
;
break
;
case
512
*
1024
:
hp
->
pgsz_mask
=
HV_PGSZ_MASK_512K
;
switch
(
tsb_idx
)
{
case
MM_TSB_BASE
:
hp
->
pgsz_mask
=
HV_PGSZ_MASK_BASE
;
break
;
case
4
*
1024
*
1024
:
hp
->
pgsz_mask
=
HV_PGSZ_MASK_
4MB
;
#ifdef CONFIG_HUGETLB_PAGE
case
MM_TSB_HUGE
:
hp
->
pgsz_mask
=
HV_PGSZ_MASK_
HUGE
;
break
;
#endif
default:
BUG
();
};
hp
->
tsb_base
=
tsb_paddr
;
hp
->
resv
=
0
;
...
...
@@ -241,11 +272,11 @@ void __init tsb_cache_init(void)
}
}
/* When the RSS of an address space exceeds
mm->context.tsb_rss_limit
,
* do_sparc64_fault() invokes this routine to try and grow
the TSB
.
/* When the RSS of an address space exceeds
tsb_rss_limit for a TSB
,
* do_sparc64_fault() invokes this routine to try and grow
it
.
*
* When we reach the maximum TSB size supported, we stick ~0UL into
*
mm->context.tsb_rss_limit so the grow checks in update_mmu_cache
()
*
tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault
()
* will not trigger any longer.
*
* The TSB can be anywhere from 8K to 1MB in size, in increasing powers
...
...
@@ -257,7 +288,7 @@ void __init tsb_cache_init(void)
* the number of entries that the current TSB can hold at once. Currently,
* we trigger when the RSS hits 3/4 of the TSB capacity.
*/
void
tsb_grow
(
struct
mm_struct
*
mm
,
unsigned
long
rss
)
void
tsb_grow
(
struct
mm_struct
*
mm
,
unsigned
long
tsb_index
,
unsigned
long
rss
)
{
unsigned
long
max_tsb_size
=
1
*
1024
*
1024
;
unsigned
long
new_size
,
old_size
,
flags
;
...
...
@@ -297,7 +328,8 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss)
* down to a 0-order allocation and force no TSB
* growing for this address space.
*/
if
(
mm
->
context
.
tsb
==
NULL
&&
new_cache_index
>
0
)
{
if
(
mm
->
context
.
tsb_block
[
tsb_index
].
tsb
==
NULL
&&
new_cache_index
>
0
)
{
new_cache_index
=
0
;
new_size
=
8192
;
new_rss_limit
=
~
0UL
;
...
...
@@ -307,8 +339,8 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss)
/* If we failed on a TSB grow, we are under serious
* memory pressure so don't try to grow any more.
*/
if
(
mm
->
context
.
tsb
!=
NULL
)
mm
->
context
.
tsb_rss_limit
=
~
0UL
;
if
(
mm
->
context
.
tsb
_block
[
tsb_index
].
tsb
!=
NULL
)
mm
->
context
.
tsb_
block
[
tsb_index
].
tsb_
rss_limit
=
~
0UL
;
return
;
}
...
...
@@ -339,23 +371,26 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss)
*/
spin_lock_irqsave
(
&
mm
->
context
.
lock
,
flags
);
old_tsb
=
mm
->
context
.
tsb
;
old_cache_index
=
(
mm
->
context
.
tsb_reg_val
&
0x7UL
);
old_size
=
mm
->
context
.
tsb_nentries
*
sizeof
(
struct
tsb
);
old_tsb
=
mm
->
context
.
tsb_block
[
tsb_index
].
tsb
;
old_cache_index
=
(
mm
->
context
.
tsb_block
[
tsb_index
].
tsb_reg_val
&
0x7UL
);
old_size
=
(
mm
->
context
.
tsb_block
[
tsb_index
].
tsb_nentries
*
sizeof
(
struct
tsb
));
/* Handle multiple threads trying to grow the TSB at the same time.
* One will get in here first, and bump the size and the RSS limit.
* The others will get in here next and hit this check.
*/
if
(
unlikely
(
old_tsb
&&
(
rss
<
mm
->
context
.
tsb_rss_limit
)))
{
if
(
unlikely
(
old_tsb
&&
(
rss
<
mm
->
context
.
tsb_block
[
tsb_index
].
tsb_rss_limit
)))
{
spin_unlock_irqrestore
(
&
mm
->
context
.
lock
,
flags
);
kmem_cache_free
(
tsb_caches
[
new_cache_index
],
new_tsb
);
return
;
}
mm
->
context
.
tsb_rss_limit
=
new_rss_limit
;
mm
->
context
.
tsb_
block
[
tsb_index
].
tsb_
rss_limit
=
new_rss_limit
;
if
(
old_tsb
)
{
extern
void
copy_tsb
(
unsigned
long
old_tsb_base
,
...
...
@@ -372,8 +407,8 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss)
copy_tsb
(
old_tsb_base
,
old_size
,
new_tsb_base
,
new_size
);
}
mm
->
context
.
tsb
=
new_tsb
;
setup_tsb_params
(
mm
,
new_size
);
mm
->
context
.
tsb
_block
[
tsb_index
].
tsb
=
new_tsb
;
setup_tsb_params
(
mm
,
tsb_index
,
new_size
);
spin_unlock_irqrestore
(
&
mm
->
context
.
lock
,
flags
);
...
...
@@ -394,40 +429,65 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss)
int
init_new_context
(
struct
task_struct
*
tsk
,
struct
mm_struct
*
mm
)
{
#ifdef CONFIG_HUGETLB_PAGE
unsigned
long
huge_pte_count
;
#endif
unsigned
int
i
;
spin_lock_init
(
&
mm
->
context
.
lock
);
mm
->
context
.
sparc64_ctx_val
=
0UL
;
#ifdef CONFIG_HUGETLB_PAGE
/* We reset it to zero because the fork() page copying
* will re-increment the counters as the parent PTEs are
* copied into the child address space.
*/
huge_pte_count
=
mm
->
context
.
huge_pte_count
;
mm
->
context
.
huge_pte_count
=
0
;
#endif
/* copy_mm() copies over the parent's mm_struct before calling
* us, so we need to zero out the TSB pointer or else tsb_grow()
* will be confused and think there is an older TSB to free up.
*/
mm
->
context
.
tsb
=
NULL
;
for
(
i
=
0
;
i
<
MM_NUM_TSBS
;
i
++
)
mm
->
context
.
tsb_block
[
i
].
tsb
=
NULL
;
/* If this is fork, inherit the parent's TSB size. We would
* grow it to that size on the first page fault anyways.
*/
tsb_grow
(
mm
,
get_mm_rss
(
mm
));
tsb_grow
(
mm
,
MM_TSB_BASE
,
get_mm_rss
(
mm
));
if
(
unlikely
(
!
mm
->
context
.
tsb
))
#ifdef CONFIG_HUGETLB_PAGE
if
(
unlikely
(
huge_pte_count
))
tsb_grow
(
mm
,
MM_TSB_HUGE
,
huge_pte_count
);
#endif
if
(
unlikely
(
!
mm
->
context
.
tsb_block
[
MM_TSB_BASE
].
tsb
))
return
-
ENOMEM
;
return
0
;
}
void
destroy_context
(
struct
mm_struct
*
mm
)
static
void
tsb_destroy_one
(
struct
tsb_config
*
tp
)
{
unsigned
long
flags
,
cache_index
;
unsigned
long
cache_index
;
cache_index
=
(
mm
->
context
.
tsb_reg_val
&
0x7UL
);
kmem_cache_free
(
tsb_caches
[
cache_index
],
mm
->
context
.
tsb
);
if
(
!
tp
->
tsb
)
return
;
cache_index
=
tp
->
tsb_reg_val
&
0x7UL
;
kmem_cache_free
(
tsb_caches
[
cache_index
],
tp
->
tsb
);
tp
->
tsb
=
NULL
;
tp
->
tsb_reg_val
=
0UL
;
}
/* We can remove these later, but for now it's useful
* to catch any bogus post-destroy_context() references
* to the TSB.
*/
mm
->
context
.
tsb
=
NULL
;
mm
->
context
.
tsb_reg_val
=
0UL
;
void
destroy_context
(
struct
mm_struct
*
mm
)
{
unsigned
long
flags
,
i
;
for
(
i
=
0
;
i
<
MM_NUM_TSBS
;
i
++
)
tsb_destroy_one
(
&
mm
->
context
.
tsb_block
[
i
])
;
spin_lock_irqsave
(
&
ctx_alloc_lock
,
flags
);
...
...
include/asm-sparc64/cpudata.h
浏览文件 @
dcc1e8dd
...
...
@@ -71,7 +71,8 @@ struct trap_per_cpu {
/* Dcache line 7: Physical addresses of CPU send mondo block and CPU list. */
unsigned
long
cpu_mondo_block_pa
;
unsigned
long
cpu_list_pa
;
unsigned
long
__pad1
[
2
];
unsigned
long
tsb_huge
;
unsigned
long
tsb_huge_temp
;
/* Dcache line 8: Unused, needed to keep trap_block a power-of-2 in size. */
unsigned
long
__pad2
[
4
];
...
...
@@ -116,6 +117,8 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
#define TRAP_PER_CPU_FAULT_INFO 0x40
#define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA 0xc0
#define TRAP_PER_CPU_CPU_LIST_PA 0xc8
#define TRAP_PER_CPU_TSB_HUGE 0xd0
#define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8
#define TRAP_BLOCK_SZ_SHIFT 8
...
...
include/asm-sparc64/mmu.h
浏览文件 @
dcc1e8dd
...
...
@@ -90,18 +90,39 @@ extern void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte
extern
void
tsb_flush
(
unsigned
long
ent
,
unsigned
long
tag
);
extern
void
tsb_init
(
struct
tsb
*
tsb
,
unsigned
long
size
);
typedef
struct
{
spinlock_t
lock
;
unsigned
long
sparc64_ctx_val
;
struct
tsb_config
{
struct
tsb
*
tsb
;
unsigned
long
tsb_rss_limit
;
unsigned
long
tsb_nentries
;
unsigned
long
tsb_reg_val
;
unsigned
long
tsb_map_vaddr
;
unsigned
long
tsb_map_pte
;
struct
hv_tsb_descr
tsb_descr
;
};
#define MM_TSB_BASE 0
#ifdef CONFIG_HUGETLB_PAGE
#define MM_TSB_HUGE 1
#define MM_NUM_TSBS 2
#else
#define MM_NUM_TSBS 1
#endif
typedef
struct
{
spinlock_t
lock
;
unsigned
long
sparc64_ctx_val
;
unsigned
long
huge_pte_count
;
struct
tsb_config
tsb_block
[
MM_NUM_TSBS
];
struct
hv_tsb_descr
tsb_descr
[
MM_NUM_TSBS
];
}
mm_context_t
;
#endif
/* !__ASSEMBLY__ */
#define TSB_CONFIG_TSB 0x00
#define TSB_CONFIG_RSS_LIMIT 0x08
#define TSB_CONFIG_NENTRIES 0x10
#define TSB_CONFIG_REG_VAL 0x18
#define TSB_CONFIG_MAP_VADDR 0x20
#define TSB_CONFIG_MAP_PTE 0x28
#endif
/* __MMU_H */
include/asm-sparc64/mmu_context.h
浏览文件 @
dcc1e8dd
...
...
@@ -29,20 +29,25 @@ extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
extern
void
destroy_context
(
struct
mm_struct
*
mm
);
extern
void
__tsb_context_switch
(
unsigned
long
pgd_pa
,
unsigned
long
tsb_reg
,
unsigned
long
tsb_vaddr
,
unsigned
long
tsb_pte
,
struct
tsb_config
*
tsb_base
,
struct
tsb_config
*
tsb_huge
,
unsigned
long
tsb_descr_pa
);
static
inline
void
tsb_context_switch
(
struct
mm_struct
*
mm
)
{
__tsb_context_switch
(
__pa
(
mm
->
pgd
),
mm
->
context
.
tsb_reg_val
,
mm
->
context
.
tsb_map_vaddr
,
mm
->
context
.
tsb_map_pte
,
__pa
(
&
mm
->
context
.
tsb_descr
));
__tsb_context_switch
(
__pa
(
mm
->
pgd
),
&
mm
->
context
.
tsb_block
[
0
],
#ifdef CONFIG_HUGETLB_PAGE
(
mm
->
context
.
tsb_block
[
1
].
tsb
?
&
mm
->
context
.
tsb_block
[
1
]
:
NULL
)
#else
NULL
#endif
,
__pa
(
&
mm
->
context
.
tsb_descr
[
0
]));
}
extern
void
tsb_grow
(
struct
mm_struct
*
mm
,
unsigned
long
mm_rss
);
extern
void
tsb_grow
(
struct
mm_struct
*
mm
,
unsigned
long
tsb_index
,
unsigned
long
mm_rss
);
#ifdef CONFIG_SMP
extern
void
smp_tsb_sync
(
struct
mm_struct
*
mm
);
#else
...
...
include/asm-sparc64/page.h
浏览文件 @
dcc1e8dd
...
...
@@ -30,6 +30,23 @@
#ifdef __KERNEL__
#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
#define HPAGE_SHIFT 22
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
#define HPAGE_SHIFT 19
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
#define HPAGE_SHIFT 16
#endif
#ifdef CONFIG_HUGETLB_PAGE
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1UL))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#define ARCH_HAS_SETCLEAR_HUGE_PTE
#define ARCH_HAS_HUGETLB_PREFAULT_HOOK
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#endif
#ifndef __ASSEMBLY__
extern
void
_clear_page
(
void
*
page
);
...
...
@@ -90,23 +107,6 @@ typedef unsigned long pgprot_t;
#endif
/* (STRICT_MM_TYPECHECKS) */
#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
#define HPAGE_SHIFT 22
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
#define HPAGE_SHIFT 19
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
#define HPAGE_SHIFT 16
#endif
#ifdef CONFIG_HUGETLB_PAGE
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1UL))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#define ARCH_HAS_SETCLEAR_HUGE_PTE
#define ARCH_HAS_HUGETLB_PREFAULT_HOOK
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#endif
#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \
(_AC(0x0000000070000000,UL)) : \
(_AC(0xfffff80000000000,UL) + (1UL << 32UL)))
...
...
include/asm-sparc64/pgtable.h
浏览文件 @
dcc1e8dd
...
...
@@ -105,6 +105,7 @@
#define _PAGE_RES1_4U _AC(0x0002000000000000,UL)
/* Reserved */
#define _PAGE_SZ32MB_4U _AC(0x0001000000000000,UL)
/* (Panther) 32MB page */
#define _PAGE_SZ256MB_4U _AC(0x2001000000000000,UL)
/* (Panther) 256MB page */
#define _PAGE_SZALL_4U _AC(0x6001000000000000,UL)
/* All pgsz bits */
#define _PAGE_SN_4U _AC(0x0000800000000000,UL)
/* (Cheetah) Snoop */
#define _PAGE_RES2_4U _AC(0x0000780000000000,UL)
/* Reserved */
#define _PAGE_PADDR_4U _AC(0x000007FFFFFFE000,UL)
/* (Cheetah) pa[42:13] */
...
...
@@ -150,6 +151,7 @@
#define _PAGE_SZ512K_4V _AC(0x0000000000000002,UL)
/* 512K Page */
#define _PAGE_SZ64K_4V _AC(0x0000000000000001,UL)
/* 64K Page */
#define _PAGE_SZ8K_4V _AC(0x0000000000000000,UL)
/* 8K Page */
#define _PAGE_SZALL_4V _AC(0x0000000000000007,UL)
/* All pgsz bits */
#if PAGE_SHIFT == 13
#define _PAGE_SZBITS_4U _PAGE_SZ8K_4U
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录