Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
a45b0616
cloud-kernel
项目概览
openanolis
/
cloud-kernel
大约 1 年 前同步成功
通知
153
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
a45b0616
编写于
1月 09, 2011
作者:
P
Pekka Enberg
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'slab/next' into for-linus
上级
3c0eee3f
8165984a
变更
6
显示空白变更内容
内联
并排
Showing
6 changed file
with
89 addition
and
75 deletion
+89
-75
Documentation/vm/Makefile
Documentation/vm/Makefile
+1
-1
include/linux/slab_def.h
include/linux/slab_def.h
+13
-20
include/linux/slub_def.h
include/linux/slub_def.h
+26
-29
mm/slab.c
mm/slab.c
+23
-15
mm/slub.c
mm/slub.c
+23
-7
tools/slub/slabinfo.c
tools/slub/slabinfo.c
+3
-3
未找到文件。
Documentation/vm/Makefile
浏览文件 @
a45b0616
...
...
@@ -2,7 +2,7 @@
obj-
:=
dummy.o
# List of programs to build
hostprogs-y
:=
slabinfo
page-types hugepage-mmap hugepage-shm map_hugetlb
hostprogs-y
:=
page-types hugepage-mmap hugepage-shm map_hugetlb
# Tell kbuild to always build the programs
always
:=
$
(
hostprogs-y
)
include/linux/slab_def.h
浏览文件 @
a45b0616
...
...
@@ -138,11 +138,12 @@ void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void
*
__kmalloc
(
size_t
size
,
gfp_t
flags
);
#ifdef CONFIG_TRACING
extern
void
*
kmem_cache_alloc_notrace
(
struct
kmem_cache
*
cachep
,
gfp_t
flags
);
extern
void
*
kmem_cache_alloc_trace
(
size_t
size
,
struct
kmem_cache
*
cachep
,
gfp_t
flags
);
extern
size_t
slab_buffer_size
(
struct
kmem_cache
*
cachep
);
#else
static
__always_inline
void
*
kmem_cache_alloc_
notrace
(
struct
kmem_cache
*
cachep
,
gfp_t
flags
)
kmem_cache_alloc_
trace
(
size_t
size
,
struct
kmem_cache
*
cachep
,
gfp_t
flags
)
{
return
kmem_cache_alloc
(
cachep
,
flags
);
}
...
...
@@ -179,10 +180,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
#endif
cachep
=
malloc_sizes
[
i
].
cs_cachep
;
ret
=
kmem_cache_alloc_notrace
(
cachep
,
flags
);
trace_kmalloc
(
_THIS_IP_
,
ret
,
size
,
slab_buffer_size
(
cachep
),
flags
);
ret
=
kmem_cache_alloc_trace
(
size
,
cachep
,
flags
);
return
ret
;
}
...
...
@@ -194,12 +192,14 @@ extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
extern
void
*
kmem_cache_alloc_node
(
struct
kmem_cache
*
,
gfp_t
flags
,
int
node
);
#ifdef CONFIG_TRACING
extern
void
*
kmem_cache_alloc_node_notrace
(
struct
kmem_cache
*
cachep
,
extern
void
*
kmem_cache_alloc_node_trace
(
size_t
size
,
struct
kmem_cache
*
cachep
,
gfp_t
flags
,
int
nodeid
);
#else
static
__always_inline
void
*
kmem_cache_alloc_node_notrace
(
struct
kmem_cache
*
cachep
,
kmem_cache_alloc_node_trace
(
size_t
size
,
struct
kmem_cache
*
cachep
,
gfp_t
flags
,
int
nodeid
)
{
...
...
@@ -210,7 +210,6 @@ kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
static
__always_inline
void
*
kmalloc_node
(
size_t
size
,
gfp_t
flags
,
int
node
)
{
struct
kmem_cache
*
cachep
;
void
*
ret
;
if
(
__builtin_constant_p
(
size
))
{
int
i
=
0
;
...
...
@@ -234,13 +233,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
#endif
cachep
=
malloc_sizes
[
i
].
cs_cachep
;
ret
=
kmem_cache_alloc_node_notrace
(
cachep
,
flags
,
node
);
trace_kmalloc_node
(
_THIS_IP_
,
ret
,
size
,
slab_buffer_size
(
cachep
),
flags
,
node
);
return
ret
;
return
kmem_cache_alloc_node_trace
(
size
,
cachep
,
flags
,
node
);
}
return
__kmalloc_node
(
size
,
flags
,
node
);
}
...
...
include/linux/slub_def.h
浏览文件 @
a45b0616
...
...
@@ -10,9 +10,8 @@
#include <linux/gfp.h>
#include <linux/workqueue.h>
#include <linux/kobject.h>
#include <linux/kmemleak.h>
#include <
trace/events/kmem
.h>
#include <
linux/kmemleak
.h>
enum
stat_item
{
ALLOC_FASTPATH
,
/* Allocation from cpu slab */
...
...
@@ -216,31 +215,40 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
void
*
kmem_cache_alloc
(
struct
kmem_cache
*
,
gfp_t
);
void
*
__kmalloc
(
size_t
size
,
gfp_t
flags
);
static
__always_inline
void
*
kmalloc_order
(
size_t
size
,
gfp_t
flags
,
unsigned
int
order
)
{
void
*
ret
=
(
void
*
)
__get_free_pages
(
flags
|
__GFP_COMP
,
order
);
kmemleak_alloc
(
ret
,
size
,
1
,
flags
);
return
ret
;
}
#ifdef CONFIG_TRACING
extern
void
*
kmem_cache_alloc_notrace
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
);
extern
void
*
kmem_cache_alloc_trace
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
,
size_t
size
);
extern
void
*
kmalloc_order_trace
(
size_t
size
,
gfp_t
flags
,
unsigned
int
order
);
#else
static
__always_inline
void
*
kmem_cache_alloc_
notrace
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
)
kmem_cache_alloc_
trace
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
,
size_t
size
)
{
return
kmem_cache_alloc
(
s
,
gfpflags
);
}
static
__always_inline
void
*
kmalloc_order_trace
(
size_t
size
,
gfp_t
flags
,
unsigned
int
order
)
{
return
kmalloc_order
(
size
,
flags
,
order
);
}
#endif
static
__always_inline
void
*
kmalloc_large
(
size_t
size
,
gfp_t
flags
)
{
unsigned
int
order
=
get_order
(
size
);
void
*
ret
=
(
void
*
)
__get_free_pages
(
flags
|
__GFP_COMP
,
order
);
kmemleak_alloc
(
ret
,
size
,
1
,
flags
);
trace_kmalloc
(
_THIS_IP_
,
ret
,
size
,
PAGE_SIZE
<<
order
,
flags
);
return
ret
;
return
kmalloc_order_trace
(
size
,
flags
,
order
);
}
static
__always_inline
void
*
kmalloc
(
size_t
size
,
gfp_t
flags
)
{
void
*
ret
;
if
(
__builtin_constant_p
(
size
))
{
if
(
size
>
SLUB_MAX_SIZE
)
return
kmalloc_large
(
size
,
flags
);
...
...
@@ -251,11 +259,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
if
(
!
s
)
return
ZERO_SIZE_PTR
;
ret
=
kmem_cache_alloc_notrace
(
s
,
flags
);
trace_kmalloc
(
_THIS_IP_
,
ret
,
size
,
s
->
size
,
flags
);
return
ret
;
return
kmem_cache_alloc_trace
(
s
,
flags
,
size
);
}
}
return
__kmalloc
(
size
,
flags
);
...
...
@@ -266,14 +270,14 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node);
void
*
kmem_cache_alloc_node
(
struct
kmem_cache
*
,
gfp_t
flags
,
int
node
);
#ifdef CONFIG_TRACING
extern
void
*
kmem_cache_alloc_node_
no
trace
(
struct
kmem_cache
*
s
,
extern
void
*
kmem_cache_alloc_node_trace
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
,
int
node
);
int
node
,
size_t
size
);
#else
static
__always_inline
void
*
kmem_cache_alloc_node_
no
trace
(
struct
kmem_cache
*
s
,
kmem_cache_alloc_node_trace
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
,
int
node
)
int
node
,
size_t
size
)
{
return
kmem_cache_alloc_node
(
s
,
gfpflags
,
node
);
}
...
...
@@ -281,8 +285,6 @@ kmem_cache_alloc_node_notrace(struct kmem_cache *s,
static
__always_inline
void
*
kmalloc_node
(
size_t
size
,
gfp_t
flags
,
int
node
)
{
void
*
ret
;
if
(
__builtin_constant_p
(
size
)
&&
size
<=
SLUB_MAX_SIZE
&&
!
(
flags
&
SLUB_DMA
))
{
struct
kmem_cache
*
s
=
kmalloc_slab
(
size
);
...
...
@@ -290,12 +292,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
if
(
!
s
)
return
ZERO_SIZE_PTR
;
ret
=
kmem_cache_alloc_node_notrace
(
s
,
flags
,
node
);
trace_kmalloc_node
(
_THIS_IP_
,
ret
,
size
,
s
->
size
,
flags
,
node
);
return
ret
;
return
kmem_cache_alloc_node_trace
(
s
,
flags
,
node
,
size
);
}
return
__kmalloc_node
(
size
,
flags
,
node
);
}
...
...
mm/slab.c
浏览文件 @
a45b0616
...
...
@@ -3653,11 +3653,18 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
EXPORT_SYMBOL
(
kmem_cache_alloc
);
#ifdef CONFIG_TRACING
void
*
kmem_cache_alloc_notrace
(
struct
kmem_cache
*
cachep
,
gfp_t
flags
)
void
*
kmem_cache_alloc_trace
(
size_t
size
,
struct
kmem_cache
*
cachep
,
gfp_t
flags
)
{
return
__cache_alloc
(
cachep
,
flags
,
__builtin_return_address
(
0
));
void
*
ret
;
ret
=
__cache_alloc
(
cachep
,
flags
,
__builtin_return_address
(
0
));
trace_kmalloc
(
_RET_IP_
,
ret
,
size
,
slab_buffer_size
(
cachep
),
flags
);
return
ret
;
}
EXPORT_SYMBOL
(
kmem_cache_alloc_
no
trace
);
EXPORT_SYMBOL
(
kmem_cache_alloc_trace
);
#endif
/**
...
...
@@ -3705,31 +3712,32 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
EXPORT_SYMBOL
(
kmem_cache_alloc_node
);
#ifdef CONFIG_TRACING
void
*
kmem_cache_alloc_node_notrace
(
struct
kmem_cache
*
cachep
,
void
*
kmem_cache_alloc_node_trace
(
size_t
size
,
struct
kmem_cache
*
cachep
,
gfp_t
flags
,
int
nodeid
)
{
return
__cache_alloc_node
(
cachep
,
flags
,
nodeid
,
void
*
ret
;
ret
=
__cache_alloc_node
(
cachep
,
flags
,
nodeid
,
__builtin_return_address
(
0
));
trace_kmalloc_node
(
_RET_IP_
,
ret
,
size
,
slab_buffer_size
(
cachep
),
flags
,
nodeid
);
return
ret
;
}
EXPORT_SYMBOL
(
kmem_cache_alloc_node_
no
trace
);
EXPORT_SYMBOL
(
kmem_cache_alloc_node_trace
);
#endif
static
__always_inline
void
*
__do_kmalloc_node
(
size_t
size
,
gfp_t
flags
,
int
node
,
void
*
caller
)
{
struct
kmem_cache
*
cachep
;
void
*
ret
;
cachep
=
kmem_find_general_cachep
(
size
,
flags
);
if
(
unlikely
(
ZERO_OR_NULL_PTR
(
cachep
)))
return
cachep
;
ret
=
kmem_cache_alloc_node_notrace
(
cachep
,
flags
,
node
);
trace_kmalloc_node
((
unsigned
long
)
caller
,
ret
,
size
,
cachep
->
buffer_size
,
flags
,
node
);
return
ret
;
return
kmem_cache_alloc_node_trace
(
size
,
cachep
,
flags
,
node
);
}
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
...
...
mm/slub.c
浏览文件 @
a45b0616
...
...
@@ -28,6 +28,8 @@
#include <linux/math64.h>
#include <linux/fault-inject.h>
#include <trace/events/kmem.h>
/*
* Lock order:
* 1. slab_lock(page)
...
...
@@ -1774,11 +1776,21 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
EXPORT_SYMBOL
(
kmem_cache_alloc
);
#ifdef CONFIG_TRACING
void
*
kmem_cache_alloc_
notrace
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
)
void
*
kmem_cache_alloc_
trace
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
,
size_t
size
)
{
return
slab_alloc
(
s
,
gfpflags
,
NUMA_NO_NODE
,
_RET_IP_
);
void
*
ret
=
slab_alloc
(
s
,
gfpflags
,
NUMA_NO_NODE
,
_RET_IP_
);
trace_kmalloc
(
_RET_IP_
,
ret
,
size
,
s
->
size
,
gfpflags
);
return
ret
;
}
EXPORT_SYMBOL
(
kmem_cache_alloc_notrace
);
EXPORT_SYMBOL
(
kmem_cache_alloc_trace
);
void
*
kmalloc_order_trace
(
size_t
size
,
gfp_t
flags
,
unsigned
int
order
)
{
void
*
ret
=
kmalloc_order
(
size
,
flags
,
order
);
trace_kmalloc
(
_RET_IP_
,
ret
,
size
,
PAGE_SIZE
<<
order
,
flags
);
return
ret
;
}
EXPORT_SYMBOL
(
kmalloc_order_trace
);
#endif
#ifdef CONFIG_NUMA
...
...
@@ -1794,13 +1806,17 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
EXPORT_SYMBOL
(
kmem_cache_alloc_node
);
#ifdef CONFIG_TRACING
void
*
kmem_cache_alloc_node_
no
trace
(
struct
kmem_cache
*
s
,
void
*
kmem_cache_alloc_node_trace
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
,
int
node
)
int
node
,
size_t
size
)
{
return
slab_alloc
(
s
,
gfpflags
,
node
,
_RET_IP_
);
void
*
ret
=
slab_alloc
(
s
,
gfpflags
,
node
,
_RET_IP_
);
trace_kmalloc_node
(
_RET_IP_
,
ret
,
size
,
s
->
size
,
gfpflags
,
node
);
return
ret
;
}
EXPORT_SYMBOL
(
kmem_cache_alloc_node_
no
trace
);
EXPORT_SYMBOL
(
kmem_cache_alloc_node_trace
);
#endif
#endif
...
...
Documentation/vm
/slabinfo.c
→
tools/slub
/slabinfo.c
浏览文件 @
a45b0616
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录