Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
e03ab9d4
cloud-kernel
项目概览
openanolis
/
cloud-kernel
1 年多 前同步成功
通知
160
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
e03ab9d4
编写于
6月 17, 2009
作者:
P
Pekka Enberg
浏览文件
操作
浏览文件
下载
差异文件
Merge branches 'slab/documentation', 'slab/fixes', 'slob/cleanups' and 'slub/fixes' into for-linus
上级
65795efb
a234bdc9
67461365
7303f240
95f85989
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
84 addition
and
24 deletion
+84
-24
include/linux/page-flags.h
include/linux/page-flags.h
+0
-2
mm/slab.c
mm/slab.c
+9
-0
mm/slob.c
mm/slob.c
+3
-3
mm/slub.c
mm/slub.c
+68
-19
mm/util.c
mm/util.c
+4
-0
未找到文件。
include/linux/page-flags.h
浏览文件 @
e03ab9d4
...
...
@@ -118,7 +118,6 @@ enum pageflags {
PG_savepinned
=
PG_dirty
,
/* SLOB */
PG_slob_page
=
PG_active
,
PG_slob_free
=
PG_private
,
/* SLUB */
...
...
@@ -201,7 +200,6 @@ PAGEFLAG(SavePinned, savepinned); /* Xen */
PAGEFLAG
(
Reserved
,
reserved
)
__CLEARPAGEFLAG
(
Reserved
,
reserved
)
PAGEFLAG
(
SwapBacked
,
swapbacked
)
__CLEARPAGEFLAG
(
SwapBacked
,
swapbacked
)
__PAGEFLAG
(
SlobPage
,
slob_page
)
__PAGEFLAG
(
SlobFree
,
slob_free
)
__PAGEFLAG
(
SlubFrozen
,
slub_frozen
)
...
...
mm/slab.c
浏览文件 @
e03ab9d4
...
...
@@ -2308,6 +2308,15 @@ kmem_cache_create (const char *name, size_t size, size_t align,
/* really off slab. No need for manual alignment */
slab_size
=
cachep
->
num
*
sizeof
(
kmem_bufctl_t
)
+
sizeof
(
struct
slab
);
#ifdef CONFIG_PAGE_POISONING
/* If we're going to use the generic kernel_map_pages()
* poisoning, then it's going to smash the contents of
* the redzone and userword anyhow, so switch them off.
*/
if
(
size
%
PAGE_SIZE
==
0
&&
flags
&
SLAB_POISON
)
flags
&=
~
(
SLAB_RED_ZONE
|
SLAB_STORE_USER
);
#endif
}
cachep
->
colour_off
=
cache_line_size
();
...
...
mm/slob.c
浏览文件 @
e03ab9d4
...
...
@@ -133,17 +133,17 @@ static LIST_HEAD(free_slob_large);
*/
static
inline
int
is_slob_page
(
struct
slob_page
*
sp
)
{
return
PageSl
obPage
((
struct
page
*
)
sp
);
return
PageSl
ab
((
struct
page
*
)
sp
);
}
static
inline
void
set_slob_page
(
struct
slob_page
*
sp
)
{
__SetPageSl
obPage
((
struct
page
*
)
sp
);
__SetPageSl
ab
((
struct
page
*
)
sp
);
}
static
inline
void
clear_slob_page
(
struct
slob_page
*
sp
)
{
__ClearPageSl
obPage
((
struct
page
*
)
sp
);
__ClearPageSl
ab
((
struct
page
*
)
sp
);
}
static
inline
struct
slob_page
*
slob_page
(
const
void
*
addr
)
...
...
mm/slub.c
浏览文件 @
e03ab9d4
...
...
@@ -840,6 +840,11 @@ static inline unsigned long slabs_node(struct kmem_cache *s, int node)
return
atomic_long_read
(
&
n
->
nr_slabs
);
}
static
inline
unsigned
long
node_nr_slabs
(
struct
kmem_cache_node
*
n
)
{
return
atomic_long_read
(
&
n
->
nr_slabs
);
}
static
inline
void
inc_slabs_node
(
struct
kmem_cache
*
s
,
int
node
,
int
objects
)
{
struct
kmem_cache_node
*
n
=
get_node
(
s
,
node
);
...
...
@@ -1058,6 +1063,8 @@ static inline unsigned long kmem_cache_flags(unsigned long objsize,
static
inline
unsigned
long
slabs_node
(
struct
kmem_cache
*
s
,
int
node
)
{
return
0
;
}
static
inline
unsigned
long
node_nr_slabs
(
struct
kmem_cache_node
*
n
)
{
return
0
;
}
static
inline
void
inc_slabs_node
(
struct
kmem_cache
*
s
,
int
node
,
int
objects
)
{}
static
inline
void
dec_slabs_node
(
struct
kmem_cache
*
s
,
int
node
,
...
...
@@ -1514,6 +1521,65 @@ static inline int node_match(struct kmem_cache_cpu *c, int node)
return
1
;
}
static
int
count_free
(
struct
page
*
page
)
{
return
page
->
objects
-
page
->
inuse
;
}
static
unsigned
long
count_partial
(
struct
kmem_cache_node
*
n
,
int
(
*
get_count
)(
struct
page
*
))
{
unsigned
long
flags
;
unsigned
long
x
=
0
;
struct
page
*
page
;
spin_lock_irqsave
(
&
n
->
list_lock
,
flags
);
list_for_each_entry
(
page
,
&
n
->
partial
,
lru
)
x
+=
get_count
(
page
);
spin_unlock_irqrestore
(
&
n
->
list_lock
,
flags
);
return
x
;
}
static
inline
unsigned
long
node_nr_objs
(
struct
kmem_cache_node
*
n
)
{
#ifdef CONFIG_SLUB_DEBUG
return
atomic_long_read
(
&
n
->
total_objects
);
#else
return
0
;
#endif
}
static
noinline
void
slab_out_of_memory
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
,
int
nid
)
{
int
node
;
printk
(
KERN_WARNING
"SLUB: Unable to allocate memory on node %d (gfp=0x%x)
\n
"
,
nid
,
gfpflags
);
printk
(
KERN_WARNING
" cache: %s, object size: %d, buffer size: %d, "
"default order: %d, min order: %d
\n
"
,
s
->
name
,
s
->
objsize
,
s
->
size
,
oo_order
(
s
->
oo
),
oo_order
(
s
->
min
));
for_each_online_node
(
node
)
{
struct
kmem_cache_node
*
n
=
get_node
(
s
,
node
);
unsigned
long
nr_slabs
;
unsigned
long
nr_objs
;
unsigned
long
nr_free
;
if
(
!
n
)
continue
;
nr_free
=
count_partial
(
n
,
count_free
);
nr_slabs
=
node_nr_slabs
(
n
);
nr_objs
=
node_nr_objs
(
n
);
printk
(
KERN_WARNING
" node %d: slabs: %ld, objs: %ld, free: %ld
\n
"
,
node
,
nr_slabs
,
nr_objs
,
nr_free
);
}
}
/*
* Slow path. The lockless freelist is empty or we need to perform
* debugging duties.
...
...
@@ -1595,6 +1661,8 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
c
->
page
=
new
;
goto
load_freelist
;
}
if
(
!
(
gfpflags
&
__GFP_NOWARN
)
&&
printk_ratelimit
())
slab_out_of_memory
(
s
,
gfpflags
,
node
);
return
NULL
;
debug:
if
(
!
alloc_debug_processing
(
s
,
c
->
page
,
object
,
addr
))
...
...
@@ -3368,20 +3436,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
}
#ifdef CONFIG_SLUB_DEBUG
static
unsigned
long
count_partial
(
struct
kmem_cache_node
*
n
,
int
(
*
get_count
)(
struct
page
*
))
{
unsigned
long
flags
;
unsigned
long
x
=
0
;
struct
page
*
page
;
spin_lock_irqsave
(
&
n
->
list_lock
,
flags
);
list_for_each_entry
(
page
,
&
n
->
partial
,
lru
)
x
+=
get_count
(
page
);
spin_unlock_irqrestore
(
&
n
->
list_lock
,
flags
);
return
x
;
}
static
int
count_inuse
(
struct
page
*
page
)
{
return
page
->
inuse
;
...
...
@@ -3392,11 +3446,6 @@ static int count_total(struct page *page)
return
page
->
objects
;
}
static
int
count_free
(
struct
page
*
page
)
{
return
page
->
objects
-
page
->
inuse
;
}
static
int
validate_slab
(
struct
kmem_cache
*
s
,
struct
page
*
page
,
unsigned
long
*
map
)
{
...
...
mm/util.c
浏览文件 @
e03ab9d4
...
...
@@ -168,6 +168,10 @@ EXPORT_SYMBOL(krealloc);
*
* The memory of the object @p points to is zeroed before freed.
* If @p is %NULL, kzfree() does nothing.
*
* Note: this function zeroes the whole allocated buffer which can be a good
* deal bigger than the requested buffer size passed to kmalloc(). So be
* careful when using this function in performance sensitive code.
*/
void
kzfree
(
const
void
*
p
)
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录