Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
415cb479
cloud-kernel
项目概览
openanolis
/
cloud-kernel
大约 1 年 前同步成功
通知
158
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
415cb479
编写于
8月 04, 2010
作者:
P
Pekka Enberg
浏览文件
操作
浏览文件
下载
差异文件
Merge branches 'slab/fixes', 'slob/fixes', 'slub/cleanups' and 'slub/fixes' into for-linus
上级
9fe6206f
78b43536
d602daba
2bce6485
bc6488e9
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
56 addition
and
49 deletion
+56
-49
include/linux/page-flags.h
include/linux/page-flags.h
+0
-2
include/linux/slab.h
include/linux/slab.h
+4
-2
mm/slab.c
mm/slab.c
+1
-1
mm/slob.c
mm/slob.c
+8
-1
mm/slub.c
mm/slub.c
+43
-43
未找到文件。
include/linux/page-flags.h
浏览文件 @
415cb479
...
...
@@ -128,7 +128,6 @@ enum pageflags {
/* SLUB */
PG_slub_frozen
=
PG_active
,
PG_slub_debug
=
PG_error
,
};
#ifndef __GENERATING_BOUNDS_H
...
...
@@ -215,7 +214,6 @@ PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
__PAGEFLAG
(
SlobFree
,
slob_free
)
__PAGEFLAG
(
SlubFrozen
,
slub_frozen
)
__PAGEFLAG
(
SlubDebug
,
slub_debug
)
/*
* Private page markings that may be used by the filesystem that owns the page
...
...
include/linux/slab.h
浏览文件 @
415cb479
...
...
@@ -268,7 +268,8 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
* allocator where we care about the real place the memory allocation
* request comes from.
*/
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
(defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
extern
void
*
__kmalloc_track_caller
(
size_t
,
gfp_t
,
unsigned
long
);
#define kmalloc_track_caller(size, flags) \
__kmalloc_track_caller(size, flags, _RET_IP_)
...
...
@@ -286,7 +287,8 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
* standard allocator where we care about the real place the memory
* allocation request comes from.
*/
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
(defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
extern
void
*
__kmalloc_node_track_caller
(
size_t
,
gfp_t
,
int
,
unsigned
long
);
#define kmalloc_node_track_caller(size, flags, node) \
__kmalloc_node_track_caller(size, flags, node, \
...
...
mm/slab.c
浏览文件 @
415cb479
...
...
@@ -861,7 +861,7 @@ static void __cpuinit start_cpu_timer(int cpu)
*/
if
(
keventd_up
()
&&
reap_work
->
work
.
func
==
NULL
)
{
init_reap_node
(
cpu
);
INIT_DELAYED_WORK
(
reap_work
,
cache_reap
);
INIT_DELAYED_WORK
_DEFERRABLE
(
reap_work
,
cache_reap
);
schedule_delayed_work_on
(
cpu
,
reap_work
,
__round_jiffies_relative
(
HZ
,
cpu
));
}
...
...
mm/slob.c
浏览文件 @
415cb479
...
...
@@ -394,6 +394,7 @@ static void slob_free(void *block, int size)
slob_t
*
prev
,
*
next
,
*
b
=
(
slob_t
*
)
block
;
slobidx_t
units
;
unsigned
long
flags
;
struct
list_head
*
slob_list
;
if
(
unlikely
(
ZERO_OR_NULL_PTR
(
block
)))
return
;
...
...
@@ -422,7 +423,13 @@ static void slob_free(void *block, int size)
set_slob
(
b
,
units
,
(
void
*
)((
unsigned
long
)(
b
+
SLOB_UNITS
(
PAGE_SIZE
))
&
PAGE_MASK
));
set_slob_page_free
(
sp
,
&
free_slob_small
);
if
(
size
<
SLOB_BREAK1
)
slob_list
=
&
free_slob_small
;
else
if
(
size
<
SLOB_BREAK2
)
slob_list
=
&
free_slob_medium
;
else
slob_list
=
&
free_slob_large
;
set_slob_page_free
(
sp
,
slob_list
);
goto
out
;
}
...
...
mm/slub.c
浏览文件 @
415cb479
...
...
@@ -107,11 +107,17 @@
* the fast path and disables lockless freelists.
*/
#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
SLAB_TRACE | SLAB_DEBUG_FREE)
static
inline
int
kmem_cache_debug
(
struct
kmem_cache
*
s
)
{
#ifdef CONFIG_SLUB_DEBUG
#define SLABDEBUG 1
return
unlikely
(
s
->
flags
&
SLAB_DEBUG_FLAGS
);
#else
#define SLABDEBUG 0
return
0
;
#endif
}
/*
* Issues still to be resolved:
...
...
@@ -162,8 +168,8 @@
#define MAX_OBJS_PER_PAGE 65535
/* since page.objects is u16 */
/* Internal SLUB flags */
#define __OBJECT_POISON 0x80000000
/* Poison object */
#define __SYSFS_ADD_DEFERRED 0x40000000
/* Not yet visible via sysfs */
#define __OBJECT_POISON 0x80000000
UL
/* Poison object */
#define __SYSFS_ADD_DEFERRED 0x40000000
UL
/* Not yet visible via sysfs */
static
int
kmem_size
=
sizeof
(
struct
kmem_cache
);
...
...
@@ -1073,7 +1079,7 @@ static inline struct page *alloc_slab_page(gfp_t flags, int node,
flags
|=
__GFP_NOTRACK
;
if
(
node
==
-
1
)
if
(
node
==
NUMA_NO_NODE
)
return
alloc_pages
(
flags
,
order
);
else
return
alloc_pages_exact_node
(
node
,
flags
,
order
);
...
...
@@ -1157,9 +1163,6 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
inc_slabs_node
(
s
,
page_to_nid
(
page
),
page
->
objects
);
page
->
slab
=
s
;
page
->
flags
|=
1
<<
PG_slab
;
if
(
s
->
flags
&
(
SLAB_DEBUG_FREE
|
SLAB_RED_ZONE
|
SLAB_POISON
|
SLAB_STORE_USER
|
SLAB_TRACE
))
__SetPageSlubDebug
(
page
);
start
=
page_address
(
page
);
...
...
@@ -1186,14 +1189,13 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
int
order
=
compound_order
(
page
);
int
pages
=
1
<<
order
;
if
(
unlikely
(
SLABDEBUG
&&
PageSlubDebug
(
page
)
))
{
if
(
kmem_cache_debug
(
s
))
{
void
*
p
;
slab_pad_check
(
s
,
page
);
for_each_object
(
p
,
s
,
page_address
(
page
),
page
->
objects
)
check_object
(
s
,
page
,
p
,
0
);
__ClearPageSlubDebug
(
page
);
}
kmemcheck_free_shadow
(
page
,
compound_order
(
page
));
...
...
@@ -1387,10 +1389,10 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
static
struct
page
*
get_partial
(
struct
kmem_cache
*
s
,
gfp_t
flags
,
int
node
)
{
struct
page
*
page
;
int
searchnode
=
(
node
==
-
1
)
?
numa_node_id
()
:
node
;
int
searchnode
=
(
node
==
NUMA_NO_NODE
)
?
numa_node_id
()
:
node
;
page
=
get_partial_node
(
get_node
(
s
,
searchnode
));
if
(
page
||
(
flags
&
__GFP_THISNODE
)
)
if
(
page
||
node
!=
-
1
)
return
page
;
return
get_any_partial
(
s
,
flags
);
...
...
@@ -1415,8 +1417,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
stat
(
s
,
tail
?
DEACTIVATE_TO_TAIL
:
DEACTIVATE_TO_HEAD
);
}
else
{
stat
(
s
,
DEACTIVATE_FULL
);
if
(
SLABDEBUG
&&
PageSlubDebug
(
page
)
&&
(
s
->
flags
&
SLAB_STORE_USER
))
if
(
kmem_cache_debug
(
s
)
&&
(
s
->
flags
&
SLAB_STORE_USER
))
add_full
(
n
,
page
);
}
slab_unlock
(
page
);
...
...
@@ -1515,7 +1516,7 @@ static void flush_all(struct kmem_cache *s)
static
inline
int
node_match
(
struct
kmem_cache_cpu
*
c
,
int
node
)
{
#ifdef CONFIG_NUMA
if
(
node
!=
-
1
&&
c
->
node
!=
node
)
if
(
node
!=
NUMA_NO_NODE
&&
c
->
node
!=
node
)
return
0
;
#endif
return
1
;
...
...
@@ -1624,7 +1625,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
object
=
c
->
page
->
freelist
;
if
(
unlikely
(
!
object
))
goto
another_slab
;
if
(
unlikely
(
SLABDEBUG
&&
PageSlubDebug
(
c
->
page
)
))
if
(
kmem_cache_debug
(
s
))
goto
debug
;
c
->
freelist
=
get_freepointer
(
s
,
object
);
...
...
@@ -1727,7 +1728,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
void
*
kmem_cache_alloc
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
)
{
void
*
ret
=
slab_alloc
(
s
,
gfpflags
,
-
1
,
_RET_IP_
);
void
*
ret
=
slab_alloc
(
s
,
gfpflags
,
NUMA_NO_NODE
,
_RET_IP_
);
trace_kmem_cache_alloc
(
_RET_IP_
,
ret
,
s
->
objsize
,
s
->
size
,
gfpflags
);
...
...
@@ -1738,7 +1739,7 @@ EXPORT_SYMBOL(kmem_cache_alloc);
#ifdef CONFIG_TRACING
void
*
kmem_cache_alloc_notrace
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
)
{
return
slab_alloc
(
s
,
gfpflags
,
-
1
,
_RET_IP_
);
return
slab_alloc
(
s
,
gfpflags
,
NUMA_NO_NODE
,
_RET_IP_
);
}
EXPORT_SYMBOL
(
kmem_cache_alloc_notrace
);
#endif
...
...
@@ -1783,7 +1784,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
stat
(
s
,
FREE_SLOWPATH
);
slab_lock
(
page
);
if
(
unlikely
(
SLABDEBUG
&&
PageSlubDebug
(
page
)
))
if
(
kmem_cache_debug
(
s
))
goto
debug
;
checks_ok:
...
...
@@ -2490,7 +2491,6 @@ void kmem_cache_destroy(struct kmem_cache *s)
s
->
refcount
--
;
if
(
!
s
->
refcount
)
{
list_del
(
&
s
->
list
);
up_write
(
&
slub_lock
);
if
(
kmem_cache_close
(
s
))
{
printk
(
KERN_ERR
"SLUB %s: %s called for cache that "
"still has objects.
\n
"
,
s
->
name
,
__func__
);
...
...
@@ -2499,8 +2499,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
if
(
s
->
flags
&
SLAB_DESTROY_BY_RCU
)
rcu_barrier
();
sysfs_slab_remove
(
s
);
}
else
up_write
(
&
slub_lock
);
}
up_write
(
&
slub_lock
);
}
EXPORT_SYMBOL
(
kmem_cache_destroy
);
...
...
@@ -2728,7 +2728,7 @@ void *__kmalloc(size_t size, gfp_t flags)
if
(
unlikely
(
ZERO_OR_NULL_PTR
(
s
)))
return
s
;
ret
=
slab_alloc
(
s
,
flags
,
-
1
,
_RET_IP_
);
ret
=
slab_alloc
(
s
,
flags
,
NUMA_NO_NODE
,
_RET_IP_
);
trace_kmalloc
(
_RET_IP_
,
ret
,
size
,
s
->
size
,
flags
);
...
...
@@ -3118,9 +3118,12 @@ void __init kmem_cache_init(void)
slab_state
=
UP
;
/* Provide the correct kmalloc names now that the caches are up */
for
(
i
=
KMALLOC_SHIFT_LOW
;
i
<
SLUB_PAGE_SHIFT
;
i
++
)
kmalloc_caches
[
i
].
name
=
kasprintf
(
GFP_NOWAIT
,
"kmalloc-%d"
,
1
<<
i
);
for
(
i
=
KMALLOC_SHIFT_LOW
;
i
<
SLUB_PAGE_SHIFT
;
i
++
)
{
char
*
s
=
kasprintf
(
GFP_NOWAIT
,
"kmalloc-%d"
,
1
<<
i
);
BUG_ON
(
!
s
);
kmalloc_caches
[
i
].
name
=
s
;
}
#ifdef CONFIG_SMP
register_cpu_notifier
(
&
slab_notifier
);
...
...
@@ -3223,14 +3226,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
*/
s
->
objsize
=
max
(
s
->
objsize
,
(
int
)
size
);
s
->
inuse
=
max_t
(
int
,
s
->
inuse
,
ALIGN
(
size
,
sizeof
(
void
*
)));
up_write
(
&
slub_lock
);
if
(
sysfs_slab_alias
(
s
,
name
))
{
down_write
(
&
slub_lock
);
s
->
refcount
--
;
up_write
(
&
slub_lock
);
goto
err
;
}
up_write
(
&
slub_lock
);
return
s
;
}
...
...
@@ -3239,14 +3240,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
if
(
kmem_cache_open
(
s
,
GFP_KERNEL
,
name
,
size
,
align
,
flags
,
ctor
))
{
list_add
(
&
s
->
list
,
&
slab_caches
);
up_write
(
&
slub_lock
);
if
(
sysfs_slab_add
(
s
))
{
down_write
(
&
slub_lock
);
list_del
(
&
s
->
list
);
up_write
(
&
slub_lock
);
kfree
(
s
);
goto
err
;
}
up_write
(
&
slub_lock
);
return
s
;
}
kfree
(
s
);
...
...
@@ -3312,7 +3311,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
if
(
unlikely
(
ZERO_OR_NULL_PTR
(
s
)))
return
s
;
ret
=
slab_alloc
(
s
,
gfpflags
,
-
1
,
caller
);
ret
=
slab_alloc
(
s
,
gfpflags
,
NUMA_NO_NODE
,
caller
);
/* Honor the call site pointer we recieved. */
trace_kmalloc
(
caller
,
ret
,
size
,
s
->
size
,
gfpflags
);
...
...
@@ -3395,16 +3394,6 @@ static void validate_slab_slab(struct kmem_cache *s, struct page *page,
}
else
printk
(
KERN_INFO
"SLUB %s: Skipped busy slab 0x%p
\n
"
,
s
->
name
,
page
);
if
(
s
->
flags
&
DEBUG_DEFAULT_FLAGS
)
{
if
(
!
PageSlubDebug
(
page
))
printk
(
KERN_ERR
"SLUB %s: SlubDebug not set "
"on slab 0x%p
\n
"
,
s
->
name
,
page
);
}
else
{
if
(
PageSlubDebug
(
page
))
printk
(
KERN_ERR
"SLUB %s: SlubDebug set on "
"slab 0x%p
\n
"
,
s
->
name
,
page
);
}
}
static
int
validate_slab_node
(
struct
kmem_cache
*
s
,
...
...
@@ -4504,6 +4493,13 @@ static int sysfs_slab_add(struct kmem_cache *s)
static
void
sysfs_slab_remove
(
struct
kmem_cache
*
s
)
{
if
(
slab_state
<
SYSFS
)
/*
* Sysfs has not been setup yet so no need to remove the
* cache from sysfs.
*/
return
;
kobject_uevent
(
&
s
->
kobj
,
KOBJ_REMOVE
);
kobject_del
(
&
s
->
kobj
);
kobject_put
(
&
s
->
kobj
);
...
...
@@ -4549,8 +4545,11 @@ static int __init slab_sysfs_init(void)
struct
kmem_cache
*
s
;
int
err
;
down_write
(
&
slub_lock
);
slab_kset
=
kset_create_and_add
(
"slab"
,
&
slab_uevent_ops
,
kernel_kobj
);
if
(
!
slab_kset
)
{
up_write
(
&
slub_lock
);
printk
(
KERN_ERR
"Cannot register slab subsystem.
\n
"
);
return
-
ENOSYS
;
}
...
...
@@ -4575,6 +4574,7 @@ static int __init slab_sysfs_init(void)
kfree
(
al
);
}
up_write
(
&
slub_lock
);
resiliency_test
();
return
0
;
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录