Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
9b4006dc
cloud-kernel
项目概览
openanolis
/
cloud-kernel
1 年多 前同步成功
通知
160
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
9b4006dc
编写于
3月 18, 2006
作者:
D
David S. Miller
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[SPARC64]: Use SLAB caches for TSB tables.
Signed-off-by:
N
David S. Miller
<
davem@davemloft.net
>
上级
b52439c2
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
69 addition
and
25 deletion
+69
-25
arch/sparc64/Kconfig
arch/sparc64/Kconfig
+3
-0
arch/sparc64/mm/init.c
arch/sparc64/mm/init.c
+4
-1
arch/sparc64/mm/tsb.c
arch/sparc64/mm/tsb.c
+62
-24
未找到文件。
arch/sparc64/Kconfig
浏览文件 @
9b4006dc
...
@@ -192,6 +192,9 @@ config ARCH_SPARSEMEM_ENABLE
...
@@ -192,6 +192,9 @@ config ARCH_SPARSEMEM_ENABLE
config ARCH_SPARSEMEM_DEFAULT
config ARCH_SPARSEMEM_DEFAULT
def_bool y
def_bool y
config LARGE_ALLOCS
def_bool y
source "mm/Kconfig"
source "mm/Kconfig"
config GENERIC_ISA_DMA
config GENERIC_ISA_DMA
...
...
arch/sparc64/mm/init.c
浏览文件 @
9b4006dc
...
@@ -165,6 +165,8 @@ static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
...
@@ -165,6 +165,8 @@ static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
clear_page
(
addr
);
clear_page
(
addr
);
}
}
extern
void
tsb_cache_init
(
void
);
void
pgtable_cache_init
(
void
)
void
pgtable_cache_init
(
void
)
{
{
pgtable_cache
=
kmem_cache_create
(
"pgtable_cache"
,
pgtable_cache
=
kmem_cache_create
(
"pgtable_cache"
,
...
@@ -174,9 +176,10 @@ void pgtable_cache_init(void)
...
@@ -174,9 +176,10 @@ void pgtable_cache_init(void)
zero_ctor
,
zero_ctor
,
NULL
);
NULL
);
if
(
!
pgtable_cache
)
{
if
(
!
pgtable_cache
)
{
prom_printf
(
"
pgtable_cache_init(): Could not create!
\n
"
);
prom_printf
(
"
Could not create pgtable_cache
\n
"
);
prom_halt
();
prom_halt
();
}
}
tsb_cache_init
();
}
}
#ifdef CONFIG_DEBUG_DCFLUSH
#ifdef CONFIG_DEBUG_DCFLUSH
...
...
arch/sparc64/mm/tsb.c
浏览文件 @
9b4006dc
...
@@ -11,6 +11,7 @@
...
@@ -11,6 +11,7 @@
#include <asm/mmu_context.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/pgtable.h>
#include <asm/tsb.h>
#include <asm/tsb.h>
#include <asm/oplib.h>
extern
struct
tsb
swapper_tsb
[
KERNEL_TSB_NENTRIES
];
extern
struct
tsb
swapper_tsb
[
KERNEL_TSB_NENTRIES
];
...
@@ -207,6 +208,39 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
...
@@ -207,6 +208,39 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
}
}
}
}
static
kmem_cache_t
*
tsb_caches
[
8
]
__read_mostly
;
static
const
char
*
tsb_cache_names
[
8
]
=
{
"tsb_8KB"
,
"tsb_16KB"
,
"tsb_32KB"
,
"tsb_64KB"
,
"tsb_128KB"
,
"tsb_256KB"
,
"tsb_512KB"
,
"tsb_1MB"
,
};
void
__init
tsb_cache_init
(
void
)
{
unsigned
long
i
;
for
(
i
=
0
;
i
<
8
;
i
++
)
{
unsigned
long
size
=
8192
<<
i
;
const
char
*
name
=
tsb_cache_names
[
i
];
tsb_caches
[
i
]
=
kmem_cache_create
(
name
,
size
,
size
,
SLAB_HWCACHE_ALIGN
|
SLAB_MUST_HWCACHE_ALIGN
,
NULL
,
NULL
);
if
(
!
tsb_caches
[
i
])
{
prom_printf
(
"Could not create %s cache
\n
"
,
name
);
prom_halt
();
}
}
}
/* When the RSS of an address space exceeds mm->context.tsb_rss_limit,
/* When the RSS of an address space exceeds mm->context.tsb_rss_limit,
* do_sparc64_fault() invokes this routine to try and grow the TSB.
* do_sparc64_fault() invokes this routine to try and grow the TSB.
*
*
...
@@ -226,45 +260,48 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
...
@@ -226,45 +260,48 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
void
tsb_grow
(
struct
mm_struct
*
mm
,
unsigned
long
rss
)
void
tsb_grow
(
struct
mm_struct
*
mm
,
unsigned
long
rss
)
{
{
unsigned
long
max_tsb_size
=
1
*
1024
*
1024
;
unsigned
long
max_tsb_size
=
1
*
1024
*
1024
;
unsigned
long
size
,
old_size
,
flags
;
unsigned
long
new_size
,
old_size
,
flags
;
struct
page
*
page
;
struct
tsb
*
old_tsb
,
*
new_tsb
;
struct
tsb
*
old_tsb
,
*
new_tsb
;
unsigned
long
order
,
new_rss_limit
;
unsigned
long
new_cache_index
,
old_cache_index
;
unsigned
long
new_rss_limit
;
gfp_t
gfp_flags
;
gfp_t
gfp_flags
;
if
(
max_tsb_size
>
(
PAGE_SIZE
<<
MAX_ORDER
))
if
(
max_tsb_size
>
(
PAGE_SIZE
<<
MAX_ORDER
))
max_tsb_size
=
(
PAGE_SIZE
<<
MAX_ORDER
);
max_tsb_size
=
(
PAGE_SIZE
<<
MAX_ORDER
);
for
(
size
=
PAGE_SIZE
;
size
<
max_tsb_size
;
size
<<=
1UL
)
{
new_cache_index
=
0
;
unsigned
long
n_entries
=
size
/
sizeof
(
struct
tsb
);
for
(
new_size
=
8192
;
new_size
<
max_tsb_size
;
new_size
<<=
1UL
)
{
unsigned
long
n_entries
=
new_size
/
sizeof
(
struct
tsb
);
n_entries
=
(
n_entries
*
3
)
/
4
;
n_entries
=
(
n_entries
*
3
)
/
4
;
if
(
n_entries
>
rss
)
if
(
n_entries
>
rss
)
break
;
break
;
new_cache_index
++
;
}
}
if
(
size
==
max_tsb_size
)
if
(
new_
size
==
max_tsb_size
)
new_rss_limit
=
~
0UL
;
new_rss_limit
=
~
0UL
;
else
else
new_rss_limit
=
((
size
/
sizeof
(
struct
tsb
))
*
3
)
/
4
;
new_rss_limit
=
((
new_
size
/
sizeof
(
struct
tsb
))
*
3
)
/
4
;
retry_page_alloc:
retry_tsb_alloc:
order
=
get_order
(
size
);
gfp_flags
=
GFP_KERNEL
;
gfp_flags
=
GFP_KERNEL
;
if
(
order
>
1
)
if
(
new_size
>
(
PAGE_SIZE
*
2
)
)
gfp_flags
=
__GFP_NOWARN
|
__GFP_NORETRY
;
gfp_flags
=
__GFP_NOWARN
|
__GFP_NORETRY
;
page
=
alloc_pages
(
gfp_flags
,
order
);
new_tsb
=
kmem_cache_alloc
(
tsb_caches
[
new_cache_index
],
gfp_flags
);
if
(
unlikely
(
!
page
))
{
if
(
unlikely
(
!
new_tsb
))
{
/* Not being able to fork due to a high-order TSB
/* Not being able to fork due to a high-order TSB
* allocation failure is very bad behavior. Just back
* allocation failure is very bad behavior. Just back
* down to a 0-order allocation and force no TSB
* down to a 0-order allocation and force no TSB
* growing for this address space.
* growing for this address space.
*/
*/
if
(
mm
->
context
.
tsb
==
NULL
&&
order
>
0
)
{
if
(
mm
->
context
.
tsb
==
NULL
&&
new_cache_index
>
0
)
{
size
=
PAGE_SIZE
;
new_cache_index
=
0
;
new_size
=
8192
;
new_rss_limit
=
~
0UL
;
new_rss_limit
=
~
0UL
;
goto
retry_
page
_alloc
;
goto
retry_
tsb
_alloc
;
}
}
/* If we failed on a TSB grow, we are under serious
/* If we failed on a TSB grow, we are under serious
...
@@ -276,8 +313,7 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss)
...
@@ -276,8 +313,7 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss)
}
}
/* Mark all tags as invalid. */
/* Mark all tags as invalid. */
new_tsb
=
page_address
(
page
);
memset
(
new_tsb
,
0x40
,
new_size
);
memset
(
new_tsb
,
0x40
,
size
);
/* Ok, we are about to commit the changes. If we are
/* Ok, we are about to commit the changes. If we are
* growing an existing TSB the locking is very tricky,
* growing an existing TSB the locking is very tricky,
...
@@ -304,8 +340,10 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss)
...
@@ -304,8 +340,10 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss)
spin_lock_irqsave
(
&
mm
->
context
.
lock
,
flags
);
spin_lock_irqsave
(
&
mm
->
context
.
lock
,
flags
);
old_tsb
=
mm
->
context
.
tsb
;
old_tsb
=
mm
->
context
.
tsb
;
old_cache_index
=
(
mm
->
context
.
tsb_reg_val
&
0x7UL
);
old_size
=
mm
->
context
.
tsb_nentries
*
sizeof
(
struct
tsb
);
old_size
=
mm
->
context
.
tsb_nentries
*
sizeof
(
struct
tsb
);
/* Handle multiple threads trying to grow the TSB at the same time.
/* Handle multiple threads trying to grow the TSB at the same time.
* One will get in here first, and bump the size and the RSS limit.
* One will get in here first, and bump the size and the RSS limit.
* The others will get in here next and hit this check.
* The others will get in here next and hit this check.
...
@@ -313,7 +351,7 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss)
...
@@ -313,7 +351,7 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss)
if
(
unlikely
(
old_tsb
&&
(
rss
<
mm
->
context
.
tsb_rss_limit
)))
{
if
(
unlikely
(
old_tsb
&&
(
rss
<
mm
->
context
.
tsb_rss_limit
)))
{
spin_unlock_irqrestore
(
&
mm
->
context
.
lock
,
flags
);
spin_unlock_irqrestore
(
&
mm
->
context
.
lock
,
flags
);
free_pages
((
unsigned
long
)
new_tsb
,
get_order
(
size
)
);
kmem_cache_free
(
tsb_caches
[
new_cache_index
],
new_tsb
);
return
;
return
;
}
}
...
@@ -331,11 +369,11 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss)
...
@@ -331,11 +369,11 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss)
old_tsb_base
=
__pa
(
old_tsb_base
);
old_tsb_base
=
__pa
(
old_tsb_base
);
new_tsb_base
=
__pa
(
new_tsb_base
);
new_tsb_base
=
__pa
(
new_tsb_base
);
}
}
copy_tsb
(
old_tsb_base
,
old_size
,
new_tsb_base
,
size
);
copy_tsb
(
old_tsb_base
,
old_size
,
new_tsb_base
,
new_
size
);
}
}
mm
->
context
.
tsb
=
new_tsb
;
mm
->
context
.
tsb
=
new_tsb
;
setup_tsb_params
(
mm
,
size
);
setup_tsb_params
(
mm
,
new_
size
);
spin_unlock_irqrestore
(
&
mm
->
context
.
lock
,
flags
);
spin_unlock_irqrestore
(
&
mm
->
context
.
lock
,
flags
);
...
@@ -350,7 +388,7 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss)
...
@@ -350,7 +388,7 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss)
smp_tsb_sync
(
mm
);
smp_tsb_sync
(
mm
);
/* Now it is safe to free the old tsb. */
/* Now it is safe to free the old tsb. */
free_pages
((
unsigned
long
)
old_tsb
,
get_order
(
old_size
)
);
kmem_cache_free
(
tsb_caches
[
old_cache_index
],
old_tsb
);
}
}
}
}
...
@@ -379,10 +417,10 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
...
@@ -379,10 +417,10 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
void
destroy_context
(
struct
mm_struct
*
mm
)
void
destroy_context
(
struct
mm_struct
*
mm
)
{
{
unsigned
long
size
=
mm
->
context
.
tsb_nentries
*
sizeof
(
struct
tsb
);
unsigned
long
flags
,
cache_index
;
unsigned
long
flags
;
free_pages
((
unsigned
long
)
mm
->
context
.
tsb
,
get_order
(
size
));
cache_index
=
(
mm
->
context
.
tsb_reg_val
&
0x7UL
);
kmem_cache_free
(
tsb_caches
[
cache_index
],
mm
->
context
.
tsb
);
/* We can remove these later, but for now it's useful
/* We can remove these later, but for now it's useful
* to catch any bogus post-destroy_context() references
* to catch any bogus post-destroy_context() references
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录