Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
raspberrypi-kernel
提交
e2b093f3
R
raspberrypi-kernel
项目概览
openeuler
/
raspberrypi-kernel
通知
13
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
R
raspberrypi-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
e2b093f3
编写于
3月 04, 2010
作者:
P
Pekka Enberg
浏览文件
操作
浏览文件
下载
差异文件
Merge branches 'slab/cleanups', 'slab/failslab', 'slab/fixes' and 'slub/percpu' into slab-for-linus
上级
eaa5eec7
f3186a9c
4c13dd3b
44b57f1c
91efd773
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
146 addition
and
260 deletion
+146
-260
Documentation/vm/slub.txt
Documentation/vm/slub.txt
+1
-0
include/linux/fault-inject.h
include/linux/fault-inject.h
+3
-2
include/linux/slab.h
include/linux/slab.h
+5
-0
include/linux/slub_def.h
include/linux/slub_def.h
+12
-15
mm/failslab.c
mm/failslab.c
+15
-3
mm/slab.c
mm/slab.c
+6
-7
mm/slub.c
mm/slub.c
+104
-233
未找到文件。
Documentation/vm/slub.txt
浏览文件 @
e2b093f3
...
...
@@ -41,6 +41,7 @@ Possible debug options are
P Poisoning (object and padding)
U User tracking (free and alloc)
T Trace (please only use on single slabs)
A Toggle failslab filter mark for the cache
O Switch debugging off for caches that would have
caused higher minimum slab orders
- Switch all debugging off (useful if the kernel is
...
...
include/linux/fault-inject.h
浏览文件 @
e2b093f3
...
...
@@ -82,9 +82,10 @@ static inline void cleanup_fault_attr_dentries(struct fault_attr *attr)
#endif
/* CONFIG_FAULT_INJECTION */
#ifdef CONFIG_FAILSLAB
extern
bool
should_failslab
(
size_t
size
,
gfp_t
gfpflags
);
extern
bool
should_failslab
(
size_t
size
,
gfp_t
gfpflags
,
unsigned
long
flags
);
#else
static
inline
bool
should_failslab
(
size_t
size
,
gfp_t
gfpflags
)
static
inline
bool
should_failslab
(
size_t
size
,
gfp_t
gfpflags
,
unsigned
long
flags
)
{
return
false
;
}
...
...
include/linux/slab.h
浏览文件 @
e2b093f3
...
...
@@ -70,6 +70,11 @@
#else
# define SLAB_NOTRACK 0x00000000UL
#endif
#ifdef CONFIG_FAILSLAB
# define SLAB_FAILSLAB 0x02000000UL
/* Fault injection mark */
#else
# define SLAB_FAILSLAB 0x00000000UL
#endif
/* The following flags affect the page allocator grouping pages by mobility */
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL
/* Objects are reclaimable */
...
...
include/linux/slub_def.h
浏览文件 @
e2b093f3
...
...
@@ -38,8 +38,6 @@ struct kmem_cache_cpu {
void
**
freelist
;
/* Pointer to first free per cpu object */
struct
page
*
page
;
/* The slab from which we are allocating */
int
node
;
/* The node of the page (or -1 for debug) */
unsigned
int
offset
;
/* Freepointer offset (in word units) */
unsigned
int
objsize
;
/* Size of an object (from kmem_cache) */
#ifdef CONFIG_SLUB_STATS
unsigned
stat
[
NR_SLUB_STAT_ITEMS
];
#endif
...
...
@@ -69,6 +67,7 @@ struct kmem_cache_order_objects {
* Slab cache management.
*/
struct
kmem_cache
{
struct
kmem_cache_cpu
*
cpu_slab
;
/* Used for retriving partial slabs etc */
unsigned
long
flags
;
int
size
;
/* The size of an object including meta data */
...
...
@@ -104,11 +103,6 @@ struct kmem_cache {
int
remote_node_defrag_ratio
;
struct
kmem_cache_node
*
node
[
MAX_NUMNODES
];
#endif
#ifdef CONFIG_SMP
struct
kmem_cache_cpu
*
cpu_slab
[
NR_CPUS
];
#else
struct
kmem_cache_cpu
cpu_slab
;
#endif
};
/*
...
...
@@ -135,11 +129,21 @@ struct kmem_cache {
#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2)
#ifdef CONFIG_ZONE_DMA
#define SLUB_DMA __GFP_DMA
/* Reserve extra caches for potential DMA use */
#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT - 6)
#else
/* Disable DMA functionality */
#define SLUB_DMA (__force gfp_t)0
#define KMALLOC_CACHES SLUB_PAGE_SHIFT
#endif
/*
* We keep the general caches in an array of slab caches that are used for
* 2^x bytes of allocations.
*/
extern
struct
kmem_cache
kmalloc_caches
[
SLUB_PAGE_SHIFT
];
extern
struct
kmem_cache
kmalloc_caches
[
KMALLOC_CACHES
];
/*
* Sorry that the following has to be that ugly but some versions of GCC
...
...
@@ -207,13 +211,6 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
return
&
kmalloc_caches
[
index
];
}
#ifdef CONFIG_ZONE_DMA
#define SLUB_DMA __GFP_DMA
#else
/* Disable DMA functionality */
#define SLUB_DMA (__force gfp_t)0
#endif
void
*
kmem_cache_alloc
(
struct
kmem_cache
*
,
gfp_t
);
void
*
__kmalloc
(
size_t
size
,
gfp_t
flags
);
...
...
mm/failslab.c
浏览文件 @
e2b093f3
#include <linux/fault-inject.h>
#include <linux/gfp.h>
#include <linux/slab.h>
static
struct
{
struct
fault_attr
attr
;
u32
ignore_gfp_wait
;
int
cache_filter
;
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
struct
dentry
*
ignore_gfp_wait_file
;
struct
dentry
*
cache_filter_file
;
#endif
}
failslab
=
{
.
attr
=
FAULT_ATTR_INITIALIZER
,
.
ignore_gfp_wait
=
1
,
.
cache_filter
=
0
,
};
bool
should_failslab
(
size_t
size
,
gfp_t
gfpflags
)
bool
should_failslab
(
size_t
size
,
gfp_t
gfpflags
,
unsigned
long
cache_flags
)
{
if
(
gfpflags
&
__GFP_NOFAIL
)
return
false
;
...
...
@@ -20,6 +24,9 @@ bool should_failslab(size_t size, gfp_t gfpflags)
if
(
failslab
.
ignore_gfp_wait
&&
(
gfpflags
&
__GFP_WAIT
))
return
false
;
if
(
failslab
.
cache_filter
&&
!
(
cache_flags
&
SLAB_FAILSLAB
))
return
false
;
return
should_fail
(
&
failslab
.
attr
,
size
);
}
...
...
@@ -30,7 +37,6 @@ static int __init setup_failslab(char *str)
__setup
(
"failslab="
,
setup_failslab
);
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
static
int
__init
failslab_debugfs_init
(
void
)
{
mode_t
mode
=
S_IFREG
|
S_IRUSR
|
S_IWUSR
;
...
...
@@ -46,8 +52,14 @@ static int __init failslab_debugfs_init(void)
debugfs_create_bool
(
"ignore-gfp-wait"
,
mode
,
dir
,
&
failslab
.
ignore_gfp_wait
);
if
(
!
failslab
.
ignore_gfp_wait_file
)
{
failslab
.
cache_filter_file
=
debugfs_create_bool
(
"cache-filter"
,
mode
,
dir
,
&
failslab
.
cache_filter
);
if
(
!
failslab
.
ignore_gfp_wait_file
||
!
failslab
.
cache_filter_file
)
{
err
=
-
ENOMEM
;
debugfs_remove
(
failslab
.
cache_filter_file
);
debugfs_remove
(
failslab
.
ignore_gfp_wait_file
);
cleanup_fault_attr_dentries
(
&
failslab
.
attr
);
}
...
...
mm/slab.c
浏览文件 @
e2b093f3
...
...
@@ -935,7 +935,6 @@ static int transfer_objects(struct array_cache *to,
from
->
avail
-=
nr
;
to
->
avail
+=
nr
;
to
->
touched
=
1
;
return
nr
;
}
...
...
@@ -983,13 +982,11 @@ static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
if
(
limit
>
1
)
limit
=
12
;
ac_ptr
=
k
m
alloc_node
(
memsize
,
gfp
,
node
);
ac_ptr
=
k
z
alloc_node
(
memsize
,
gfp
,
node
);
if
(
ac_ptr
)
{
for_each_node
(
i
)
{
if
(
i
==
node
||
!
node_online
(
i
))
{
ac_ptr
[
i
]
=
NULL
;
if
(
i
==
node
||
!
node_online
(
i
))
continue
;
}
ac_ptr
[
i
]
=
alloc_arraycache
(
node
,
limit
,
0xbaadf00d
,
gfp
);
if
(
!
ac_ptr
[
i
])
{
for
(
i
--
;
i
>=
0
;
i
--
)
...
...
@@ -2963,8 +2960,10 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
spin_lock
(
&
l3
->
list_lock
);
/* See if we can refill from the shared array */
if
(
l3
->
shared
&&
transfer_objects
(
ac
,
l3
->
shared
,
batchcount
))
if
(
l3
->
shared
&&
transfer_objects
(
ac
,
l3
->
shared
,
batchcount
))
{
l3
->
shared
->
touched
=
1
;
goto
alloc_done
;
}
while
(
batchcount
>
0
)
{
struct
list_head
*
entry
;
...
...
@@ -3101,7 +3100,7 @@ static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
if
(
cachep
==
&
cache_cache
)
return
false
;
return
should_failslab
(
obj_size
(
cachep
),
flags
);
return
should_failslab
(
obj_size
(
cachep
),
flags
,
cachep
->
flags
);
}
static
inline
void
*
____cache_alloc
(
struct
kmem_cache
*
cachep
,
gfp_t
flags
)
...
...
mm/slub.c
浏览文件 @
e2b093f3
...
...
@@ -151,7 +151,8 @@
* Set of flags that will prevent slab merging
*/
#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE)
SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
SLAB_FAILSLAB)
#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
SLAB_CACHE_DMA | SLAB_NOTRACK)
...
...
@@ -217,10 +218,10 @@ static inline void sysfs_slab_remove(struct kmem_cache *s)
#endif
static
inline
void
stat
(
struct
kmem_cache
_cpu
*
c
,
enum
stat_item
si
)
static
inline
void
stat
(
struct
kmem_cache
*
s
,
enum
stat_item
si
)
{
#ifdef CONFIG_SLUB_STATS
c
->
stat
[
si
]
++
;
__this_cpu_inc
(
s
->
cpu_slab
->
stat
[
si
])
;
#endif
}
...
...
@@ -242,15 +243,6 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
#endif
}
static
inline
struct
kmem_cache_cpu
*
get_cpu_slab
(
struct
kmem_cache
*
s
,
int
cpu
)
{
#ifdef CONFIG_SMP
return
s
->
cpu_slab
[
cpu
];
#else
return
&
s
->
cpu_slab
;
#endif
}
/* Verify that a pointer has an address that is valid within a slab page */
static
inline
int
check_valid_pointer
(
struct
kmem_cache
*
s
,
struct
page
*
page
,
const
void
*
object
)
...
...
@@ -269,13 +261,6 @@ static inline int check_valid_pointer(struct kmem_cache *s,
return
1
;
}
/*
* Slow version of get and set free pointer.
*
* This version requires touching the cache lines of kmem_cache which
* we avoid to do in the fast alloc free paths. There we obtain the offset
* from the page struct.
*/
static
inline
void
*
get_freepointer
(
struct
kmem_cache
*
s
,
void
*
object
)
{
return
*
(
void
**
)(
object
+
s
->
offset
);
...
...
@@ -1020,6 +1005,9 @@ static int __init setup_slub_debug(char *str)
case
't'
:
slub_debug
|=
SLAB_TRACE
;
break
;
case
'a'
:
slub_debug
|=
SLAB_FAILSLAB
;
break
;
default:
printk
(
KERN_ERR
"slub_debug option '%c' "
"unknown. skipped
\n
"
,
*
str
);
...
...
@@ -1124,7 +1112,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
if
(
!
page
)
return
NULL
;
stat
(
get_cpu_slab
(
s
,
raw_smp_processor_id
())
,
ORDER_FALLBACK
);
stat
(
s
,
ORDER_FALLBACK
);
}
if
(
kmemcheck_enabled
...
...
@@ -1422,23 +1410,22 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
static
void
unfreeze_slab
(
struct
kmem_cache
*
s
,
struct
page
*
page
,
int
tail
)
{
struct
kmem_cache_node
*
n
=
get_node
(
s
,
page_to_nid
(
page
));
struct
kmem_cache_cpu
*
c
=
get_cpu_slab
(
s
,
smp_processor_id
());
__ClearPageSlubFrozen
(
page
);
if
(
page
->
inuse
)
{
if
(
page
->
freelist
)
{
add_partial
(
n
,
page
,
tail
);
stat
(
c
,
tail
?
DEACTIVATE_TO_TAIL
:
DEACTIVATE_TO_HEAD
);
stat
(
s
,
tail
?
DEACTIVATE_TO_TAIL
:
DEACTIVATE_TO_HEAD
);
}
else
{
stat
(
c
,
DEACTIVATE_FULL
);
stat
(
s
,
DEACTIVATE_FULL
);
if
(
SLABDEBUG
&&
PageSlubDebug
(
page
)
&&
(
s
->
flags
&
SLAB_STORE_USER
))
add_full
(
n
,
page
);
}
slab_unlock
(
page
);
}
else
{
stat
(
c
,
DEACTIVATE_EMPTY
);
stat
(
s
,
DEACTIVATE_EMPTY
);
if
(
n
->
nr_partial
<
s
->
min_partial
)
{
/*
* Adding an empty slab to the partial slabs in order
...
...
@@ -1454,7 +1441,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
slab_unlock
(
page
);
}
else
{
slab_unlock
(
page
);
stat
(
get_cpu_slab
(
s
,
raw_smp_processor_id
())
,
FREE_SLAB
);
stat
(
s
,
FREE_SLAB
);
discard_slab
(
s
,
page
);
}
}
...
...
@@ -1469,7 +1456,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
int
tail
=
1
;
if
(
page
->
freelist
)
stat
(
c
,
DEACTIVATE_REMOTE_FREES
);
stat
(
s
,
DEACTIVATE_REMOTE_FREES
);
/*
* Merge cpu freelist into slab freelist. Typically we get here
* because both freelists are empty. So this is unlikely
...
...
@@ -1482,10 +1469,10 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
/* Retrieve object from cpu_freelist */
object
=
c
->
freelist
;
c
->
freelist
=
c
->
freelist
[
c
->
offset
]
;
c
->
freelist
=
get_freepointer
(
s
,
c
->
freelist
)
;
/* And put onto the regular freelist */
object
[
c
->
offset
]
=
page
->
freelist
;
set_freepointer
(
s
,
object
,
page
->
freelist
)
;
page
->
freelist
=
object
;
page
->
inuse
--
;
}
...
...
@@ -1495,7 +1482,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
static
inline
void
flush_slab
(
struct
kmem_cache
*
s
,
struct
kmem_cache_cpu
*
c
)
{
stat
(
c
,
CPUSLAB_FLUSH
);
stat
(
s
,
CPUSLAB_FLUSH
);
slab_lock
(
c
->
page
);
deactivate_slab
(
s
,
c
);
}
...
...
@@ -1507,7 +1494,7 @@ static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
*/
static
inline
void
__flush_cpu_slab
(
struct
kmem_cache
*
s
,
int
cpu
)
{
struct
kmem_cache_cpu
*
c
=
get_cpu_slab
(
s
,
cpu
);
struct
kmem_cache_cpu
*
c
=
per_cpu_ptr
(
s
->
cpu_slab
,
cpu
);
if
(
likely
(
c
&&
c
->
page
))
flush_slab
(
s
,
c
);
...
...
@@ -1635,7 +1622,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
if
(
unlikely
(
!
node_match
(
c
,
node
)))
goto
another_slab
;
stat
(
c
,
ALLOC_REFILL
);
stat
(
s
,
ALLOC_REFILL
);
load_freelist:
object
=
c
->
page
->
freelist
;
...
...
@@ -1644,13 +1631,13 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
if
(
unlikely
(
SLABDEBUG
&&
PageSlubDebug
(
c
->
page
)))
goto
debug
;
c
->
freelist
=
object
[
c
->
offset
]
;
c
->
freelist
=
get_freepointer
(
s
,
object
)
;
c
->
page
->
inuse
=
c
->
page
->
objects
;
c
->
page
->
freelist
=
NULL
;
c
->
node
=
page_to_nid
(
c
->
page
);
unlock_out:
slab_unlock
(
c
->
page
);
stat
(
c
,
ALLOC_SLOWPATH
);
stat
(
s
,
ALLOC_SLOWPATH
);
return
object
;
another_slab:
...
...
@@ -1660,7 +1647,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
new
=
get_partial
(
s
,
gfpflags
,
node
);
if
(
new
)
{
c
->
page
=
new
;
stat
(
c
,
ALLOC_FROM_PARTIAL
);
stat
(
s
,
ALLOC_FROM_PARTIAL
);
goto
load_freelist
;
}
...
...
@@ -1673,8 +1660,8 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
local_irq_disable
();
if
(
new
)
{
c
=
get_cpu_slab
(
s
,
smp_processor_id
()
);
stat
(
c
,
ALLOC_SLAB
);
c
=
__this_cpu_ptr
(
s
->
cpu_slab
);
stat
(
s
,
ALLOC_SLAB
);
if
(
c
->
page
)
flush_slab
(
s
,
c
);
slab_lock
(
new
);
...
...
@@ -1690,7 +1677,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
goto
another_slab
;
c
->
page
->
inuse
++
;
c
->
page
->
freelist
=
object
[
c
->
offset
]
;
c
->
page
->
freelist
=
get_freepointer
(
s
,
object
)
;
c
->
node
=
-
1
;
goto
unlock_out
;
}
...
...
@@ -1711,35 +1698,33 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
void
**
object
;
struct
kmem_cache_cpu
*
c
;
unsigned
long
flags
;
unsigned
int
objsize
;
gfpflags
&=
gfp_allowed_mask
;
lockdep_trace_alloc
(
gfpflags
);
might_sleep_if
(
gfpflags
&
__GFP_WAIT
);
if
(
should_failslab
(
s
->
objsize
,
gfpflags
))
if
(
should_failslab
(
s
->
objsize
,
gfpflags
,
s
->
flags
))
return
NULL
;
local_irq_save
(
flags
);
c
=
get_cpu_slab
(
s
,
smp_processor_id
()
);
obj
size
=
c
->
objsize
;
if
(
unlikely
(
!
c
->
freelis
t
||
!
node_match
(
c
,
node
)))
c
=
__this_cpu_ptr
(
s
->
cpu_slab
);
obj
ect
=
c
->
freelist
;
if
(
unlikely
(
!
objec
t
||
!
node_match
(
c
,
node
)))
object
=
__slab_alloc
(
s
,
gfpflags
,
node
,
addr
,
c
);
else
{
object
=
c
->
freelist
;
c
->
freelist
=
object
[
c
->
offset
];
stat
(
c
,
ALLOC_FASTPATH
);
c
->
freelist
=
get_freepointer
(
s
,
object
);
stat
(
s
,
ALLOC_FASTPATH
);
}
local_irq_restore
(
flags
);
if
(
unlikely
(
gfpflags
&
__GFP_ZERO
)
&&
object
)
memset
(
object
,
0
,
objsize
);
memset
(
object
,
0
,
s
->
objsize
);
kmemcheck_slab_alloc
(
s
,
gfpflags
,
object
,
c
->
objsize
);
kmemleak_alloc_recursive
(
object
,
objsize
,
1
,
s
->
flags
,
gfpflags
);
kmemcheck_slab_alloc
(
s
,
gfpflags
,
object
,
s
->
objsize
);
kmemleak_alloc_recursive
(
object
,
s
->
objsize
,
1
,
s
->
flags
,
gfpflags
);
return
object
;
}
...
...
@@ -1794,26 +1779,25 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
* handling required then we can return immediately.
*/
static
void
__slab_free
(
struct
kmem_cache
*
s
,
struct
page
*
page
,
void
*
x
,
unsigned
long
addr
,
unsigned
int
offset
)
void
*
x
,
unsigned
long
addr
)
{
void
*
prior
;
void
**
object
=
(
void
*
)
x
;
struct
kmem_cache_cpu
*
c
;
c
=
get_cpu_slab
(
s
,
raw_smp_processor_id
());
stat
(
c
,
FREE_SLOWPATH
);
stat
(
s
,
FREE_SLOWPATH
);
slab_lock
(
page
);
if
(
unlikely
(
SLABDEBUG
&&
PageSlubDebug
(
page
)))
goto
debug
;
checks_ok:
prior
=
object
[
offset
]
=
page
->
freelist
;
prior
=
page
->
freelist
;
set_freepointer
(
s
,
object
,
prior
);
page
->
freelist
=
object
;
page
->
inuse
--
;
if
(
unlikely
(
PageSlubFrozen
(
page
)))
{
stat
(
c
,
FREE_FROZEN
);
stat
(
s
,
FREE_FROZEN
);
goto
out_unlock
;
}
...
...
@@ -1826,7 +1810,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
*/
if
(
unlikely
(
!
prior
))
{
add_partial
(
get_node
(
s
,
page_to_nid
(
page
)),
page
,
1
);
stat
(
c
,
FREE_ADD_PARTIAL
);
stat
(
s
,
FREE_ADD_PARTIAL
);
}
out_unlock:
...
...
@@ -1839,10 +1823,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
* Slab still on the partial list.
*/
remove_partial
(
s
,
page
);
stat
(
c
,
FREE_REMOVE_PARTIAL
);
stat
(
s
,
FREE_REMOVE_PARTIAL
);
}
slab_unlock
(
page
);
stat
(
c
,
FREE_SLAB
);
stat
(
s
,
FREE_SLAB
);
discard_slab
(
s
,
page
);
return
;
...
...
@@ -1872,17 +1856,17 @@ static __always_inline void slab_free(struct kmem_cache *s,
kmemleak_free_recursive
(
x
,
s
->
flags
);
local_irq_save
(
flags
);
c
=
get_cpu_slab
(
s
,
smp_processor_id
()
);
kmemcheck_slab_free
(
s
,
object
,
c
->
objsize
);
debug_check_no_locks_freed
(
object
,
c
->
objsize
);
c
=
__this_cpu_ptr
(
s
->
cpu_slab
);
kmemcheck_slab_free
(
s
,
object
,
s
->
objsize
);
debug_check_no_locks_freed
(
object
,
s
->
objsize
);
if
(
!
(
s
->
flags
&
SLAB_DEBUG_OBJECTS
))
debug_check_no_obj_freed
(
object
,
c
->
objsize
);
debug_check_no_obj_freed
(
object
,
s
->
objsize
);
if
(
likely
(
page
==
c
->
page
&&
c
->
node
>=
0
))
{
object
[
c
->
offset
]
=
c
->
freelist
;
set_freepointer
(
s
,
object
,
c
->
freelist
)
;
c
->
freelist
=
object
;
stat
(
c
,
FREE_FASTPATH
);
stat
(
s
,
FREE_FASTPATH
);
}
else
__slab_free
(
s
,
page
,
x
,
addr
,
c
->
offset
);
__slab_free
(
s
,
page
,
x
,
addr
);
local_irq_restore
(
flags
);
}
...
...
@@ -2069,19 +2053,6 @@ static unsigned long calculate_alignment(unsigned long flags,
return
ALIGN
(
align
,
sizeof
(
void
*
));
}
static
void
init_kmem_cache_cpu
(
struct
kmem_cache
*
s
,
struct
kmem_cache_cpu
*
c
)
{
c
->
page
=
NULL
;
c
->
freelist
=
NULL
;
c
->
node
=
0
;
c
->
offset
=
s
->
offset
/
sizeof
(
void
*
);
c
->
objsize
=
s
->
objsize
;
#ifdef CONFIG_SLUB_STATS
memset
(
c
->
stat
,
0
,
NR_SLUB_STAT_ITEMS
*
sizeof
(
unsigned
));
#endif
}
static
void
init_kmem_cache_node
(
struct
kmem_cache_node
*
n
,
struct
kmem_cache
*
s
)
{
...
...
@@ -2095,130 +2066,24 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
#endif
}
#ifdef CONFIG_SMP
/*
* Per cpu array for per cpu structures.
*
* The per cpu array places all kmem_cache_cpu structures from one processor
* close together meaning that it becomes possible that multiple per cpu
* structures are contained in one cacheline. This may be particularly
* beneficial for the kmalloc caches.
*
* A desktop system typically has around 60-80 slabs. With 100 here we are
* likely able to get per cpu structures for all caches from the array defined
* here. We must be able to cover all kmalloc caches during bootstrap.
*
* If the per cpu array is exhausted then fall back to kmalloc
* of individual cachelines. No sharing is possible then.
*/
#define NR_KMEM_CACHE_CPU 100
static
DEFINE_PER_CPU
(
struct
kmem_cache_cpu
[
NR_KMEM_CACHE_CPU
],
kmem_cache_cpu
);
static
DEFINE_PER_CPU
(
struct
kmem_cache_cpu
*
,
kmem_cache_cpu_free
);
static
DECLARE_BITMAP
(
kmem_cach_cpu_free_init_once
,
CONFIG_NR_CPUS
);
static
struct
kmem_cache_cpu
*
alloc_kmem_cache_cpu
(
struct
kmem_cache
*
s
,
int
cpu
,
gfp_t
flags
)
{
struct
kmem_cache_cpu
*
c
=
per_cpu
(
kmem_cache_cpu_free
,
cpu
);
if
(
c
)
per_cpu
(
kmem_cache_cpu_free
,
cpu
)
=
(
void
*
)
c
->
freelist
;
else
{
/* Table overflow: So allocate ourselves */
c
=
kmalloc_node
(
ALIGN
(
sizeof
(
struct
kmem_cache_cpu
),
cache_line_size
()),
flags
,
cpu_to_node
(
cpu
));
if
(
!
c
)
return
NULL
;
}
init_kmem_cache_cpu
(
s
,
c
);
return
c
;
}
static
void
free_kmem_cache_cpu
(
struct
kmem_cache_cpu
*
c
,
int
cpu
)
{
if
(
c
<
per_cpu
(
kmem_cache_cpu
,
cpu
)
||
c
>=
per_cpu
(
kmem_cache_cpu
,
cpu
)
+
NR_KMEM_CACHE_CPU
)
{
kfree
(
c
);
return
;
}
c
->
freelist
=
(
void
*
)
per_cpu
(
kmem_cache_cpu_free
,
cpu
);
per_cpu
(
kmem_cache_cpu_free
,
cpu
)
=
c
;
}
static
void
free_kmem_cache_cpus
(
struct
kmem_cache
*
s
)
{
int
cpu
;
for_each_online_cpu
(
cpu
)
{
struct
kmem_cache_cpu
*
c
=
get_cpu_slab
(
s
,
cpu
);
if
(
c
)
{
s
->
cpu_slab
[
cpu
]
=
NULL
;
free_kmem_cache_cpu
(
c
,
cpu
);
}
}
}
static
int
alloc_kmem_cache_cpus
(
struct
kmem_cache
*
s
,
gfp_t
flags
)
{
int
cpu
;
for_each_online_cpu
(
cpu
)
{
struct
kmem_cache_cpu
*
c
=
get_cpu_slab
(
s
,
cpu
);
if
(
c
)
continue
;
c
=
alloc_kmem_cache_cpu
(
s
,
cpu
,
flags
);
if
(
!
c
)
{
free_kmem_cache_cpus
(
s
);
return
0
;
}
s
->
cpu_slab
[
cpu
]
=
c
;
}
return
1
;
}
/*
* Initialize the per cpu array.
*/
static
void
init_alloc_cpu_cpu
(
int
cpu
)
{
int
i
;
static
DEFINE_PER_CPU
(
struct
kmem_cache_cpu
,
kmalloc_percpu
[
KMALLOC_CACHES
]);
if
(
cpumask_test_cpu
(
cpu
,
to_cpumask
(
kmem_cach_cpu_free_init_once
)))
return
;
for
(
i
=
NR_KMEM_CACHE_CPU
-
1
;
i
>=
0
;
i
--
)
free_kmem_cache_cpu
(
&
per_cpu
(
kmem_cache_cpu
,
cpu
)[
i
],
cpu
);
cpumask_set_cpu
(
cpu
,
to_cpumask
(
kmem_cach_cpu_free_init_once
));
}
static
void
__init
init_alloc_cpu
(
void
)
static
inline
int
alloc_kmem_cache_cpus
(
struct
kmem_cache
*
s
,
gfp_t
flags
)
{
int
cpu
;
for_each_online_cpu
(
cpu
)
init_alloc_cpu_cpu
(
cpu
);
}
if
(
s
<
kmalloc_caches
+
KMALLOC_CACHES
&&
s
>=
kmalloc_caches
)
/*
* Boot time creation of the kmalloc array. Use static per cpu data
* since the per cpu allocator is not available yet.
*/
s
->
cpu_slab
=
per_cpu_var
(
kmalloc_percpu
)
+
(
s
-
kmalloc_caches
);
else
s
->
cpu_slab
=
alloc_percpu
(
struct
kmem_cache_cpu
);
#else
static
inline
void
free_kmem_cache_cpus
(
struct
kmem_cache
*
s
)
{}
static
inline
void
init_alloc_cpu
(
void
)
{}
if
(
!
s
->
cpu_slab
)
return
0
;
static
inline
int
alloc_kmem_cache_cpus
(
struct
kmem_cache
*
s
,
gfp_t
flags
)
{
init_kmem_cache_cpu
(
s
,
&
s
->
cpu_slab
);
return
1
;
}
#endif
#ifdef CONFIG_NUMA
/*
...
...
@@ -2287,7 +2152,8 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
int
node
;
int
local_node
;
if
(
slab_state
>=
UP
)
if
(
slab_state
>=
UP
&&
(
s
<
kmalloc_caches
||
s
>
kmalloc_caches
+
KMALLOC_CACHES
))
local_node
=
page_to_nid
(
virt_to_page
(
s
));
else
local_node
=
0
;
...
...
@@ -2502,6 +2368,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
if
(
alloc_kmem_cache_cpus
(
s
,
gfpflags
&
~
SLUB_DMA
))
return
1
;
free_kmem_cache_nodes
(
s
);
error:
if
(
flags
&
SLAB_PANIC
)
...
...
@@ -2609,9 +2476,8 @@ static inline int kmem_cache_close(struct kmem_cache *s)
int
node
;
flush_all
(
s
);
free_percpu
(
s
->
cpu_slab
);
/* Attempt to free all objects */
free_kmem_cache_cpus
(
s
);
for_each_node_state
(
node
,
N_NORMAL_MEMORY
)
{
struct
kmem_cache_node
*
n
=
get_node
(
s
,
node
);
...
...
@@ -2651,7 +2517,7 @@ EXPORT_SYMBOL(kmem_cache_destroy);
* Kmalloc subsystem
*******************************************************************/
struct
kmem_cache
kmalloc_caches
[
SLUB_PAGE_SHIFT
]
__cacheline_aligned
;
struct
kmem_cache
kmalloc_caches
[
KMALLOC_CACHES
]
__cacheline_aligned
;
EXPORT_SYMBOL
(
kmalloc_caches
);
static
int
__init
setup_slub_min_order
(
char
*
str
)
...
...
@@ -2741,6 +2607,7 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
char
*
text
;
size_t
realsize
;
unsigned
long
slabflags
;
int
i
;
s
=
kmalloc_caches_dma
[
index
];
if
(
s
)
...
...
@@ -2760,7 +2627,14 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
realsize
=
kmalloc_caches
[
index
].
objsize
;
text
=
kasprintf
(
flags
&
~
SLUB_DMA
,
"kmalloc_dma-%d"
,
(
unsigned
int
)
realsize
);
s
=
kmalloc
(
kmem_size
,
flags
&
~
SLUB_DMA
);
s
=
NULL
;
for
(
i
=
0
;
i
<
KMALLOC_CACHES
;
i
++
)
if
(
!
kmalloc_caches
[
i
].
size
)
break
;
BUG_ON
(
i
>=
KMALLOC_CACHES
);
s
=
kmalloc_caches
+
i
;
/*
* Must defer sysfs creation to a workqueue because we don't know
...
...
@@ -2772,9 +2646,9 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
if
(
slab_state
>=
SYSFS
)
slabflags
|=
__SYSFS_ADD_DEFERRED
;
if
(
!
s
||
!
text
||
!
kmem_cache_open
(
s
,
flags
,
text
,
if
(
!
text
||
!
kmem_cache_open
(
s
,
flags
,
text
,
realsize
,
ARCH_KMALLOC_MINALIGN
,
slabflags
,
NULL
))
{
kfree
(
s
)
;
s
->
size
=
0
;
kfree
(
text
);
goto
unlock_out
;
}
...
...
@@ -3176,8 +3050,6 @@ void __init kmem_cache_init(void)
int
i
;
int
caches
=
0
;
init_alloc_cpu
();
#ifdef CONFIG_NUMA
/*
* Must first have the slab cache available for the allocations of the
...
...
@@ -3261,8 +3133,10 @@ void __init kmem_cache_init(void)
#ifdef CONFIG_SMP
register_cpu_notifier
(
&
slab_notifier
);
kmem_size
=
offsetof
(
struct
kmem_cache
,
cpu_slab
)
+
nr_cpu_ids
*
sizeof
(
struct
kmem_cache_cpu
*
);
#endif
#ifdef CONFIG_NUMA
kmem_size
=
offsetof
(
struct
kmem_cache
,
node
)
+
nr_node_ids
*
sizeof
(
struct
kmem_cache_node
*
);
#else
kmem_size
=
sizeof
(
struct
kmem_cache
);
#endif
...
...
@@ -3351,22 +3225,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
down_write
(
&
slub_lock
);
s
=
find_mergeable
(
size
,
align
,
flags
,
name
,
ctor
);
if
(
s
)
{
int
cpu
;
s
->
refcount
++
;
/*
* Adjust the object sizes so that we clear
* the complete object on kzalloc.
*/
s
->
objsize
=
max
(
s
->
objsize
,
(
int
)
size
);
/*
* And then we need to update the object size in the
* per cpu structures
*/
for_each_online_cpu
(
cpu
)
get_cpu_slab
(
s
,
cpu
)
->
objsize
=
s
->
objsize
;
s
->
inuse
=
max_t
(
int
,
s
->
inuse
,
ALIGN
(
size
,
sizeof
(
void
*
)));
up_write
(
&
slub_lock
);
...
...
@@ -3420,29 +3284,15 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
unsigned
long
flags
;
switch
(
action
)
{
case
CPU_UP_PREPARE
:
case
CPU_UP_PREPARE_FROZEN
:
init_alloc_cpu_cpu
(
cpu
);
down_read
(
&
slub_lock
);
list_for_each_entry
(
s
,
&
slab_caches
,
list
)
s
->
cpu_slab
[
cpu
]
=
alloc_kmem_cache_cpu
(
s
,
cpu
,
GFP_KERNEL
);
up_read
(
&
slub_lock
);
break
;
case
CPU_UP_CANCELED
:
case
CPU_UP_CANCELED_FROZEN
:
case
CPU_DEAD
:
case
CPU_DEAD_FROZEN
:
down_read
(
&
slub_lock
);
list_for_each_entry
(
s
,
&
slab_caches
,
list
)
{
struct
kmem_cache_cpu
*
c
=
get_cpu_slab
(
s
,
cpu
);
local_irq_save
(
flags
);
__flush_cpu_slab
(
s
,
cpu
);
local_irq_restore
(
flags
);
free_kmem_cache_cpu
(
c
,
cpu
);
s
->
cpu_slab
[
cpu
]
=
NULL
;
}
up_read
(
&
slub_lock
);
break
;
...
...
@@ -3928,7 +3778,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
int
cpu
;
for_each_possible_cpu
(
cpu
)
{
struct
kmem_cache_cpu
*
c
=
get_cpu_slab
(
s
,
cpu
);
struct
kmem_cache_cpu
*
c
=
per_cpu_ptr
(
s
->
cpu_slab
,
cpu
);
if
(
!
c
||
c
->
node
<
0
)
continue
;
...
...
@@ -4171,6 +4021,23 @@ static ssize_t trace_store(struct kmem_cache *s, const char *buf,
}
SLAB_ATTR
(
trace
);
#ifdef CONFIG_FAILSLAB
static
ssize_t
failslab_show
(
struct
kmem_cache
*
s
,
char
*
buf
)
{
return
sprintf
(
buf
,
"%d
\n
"
,
!!
(
s
->
flags
&
SLAB_FAILSLAB
));
}
static
ssize_t
failslab_store
(
struct
kmem_cache
*
s
,
const
char
*
buf
,
size_t
length
)
{
s
->
flags
&=
~
SLAB_FAILSLAB
;
if
(
buf
[
0
]
==
'1'
)
s
->
flags
|=
SLAB_FAILSLAB
;
return
length
;
}
SLAB_ATTR
(
failslab
);
#endif
static
ssize_t
reclaim_account_show
(
struct
kmem_cache
*
s
,
char
*
buf
)
{
return
sprintf
(
buf
,
"%d
\n
"
,
!!
(
s
->
flags
&
SLAB_RECLAIM_ACCOUNT
));
...
...
@@ -4353,7 +4220,7 @@ static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
return
-
ENOMEM
;
for_each_online_cpu
(
cpu
)
{
unsigned
x
=
get_cpu_slab
(
s
,
cpu
)
->
stat
[
si
];
unsigned
x
=
per_cpu_ptr
(
s
->
cpu_slab
,
cpu
)
->
stat
[
si
];
data
[
cpu
]
=
x
;
sum
+=
x
;
...
...
@@ -4376,7 +4243,7 @@ static void clear_stat(struct kmem_cache *s, enum stat_item si)
int
cpu
;
for_each_online_cpu
(
cpu
)
get_cpu_slab
(
s
,
cpu
)
->
stat
[
si
]
=
0
;
per_cpu_ptr
(
s
->
cpu_slab
,
cpu
)
->
stat
[
si
]
=
0
;
}
#define STAT_ATTR(si, text) \
...
...
@@ -4467,6 +4334,10 @@ static struct attribute *slab_attrs[] = {
&
deactivate_remote_frees_attr
.
attr
,
&
order_fallback_attr
.
attr
,
#endif
#ifdef CONFIG_FAILSLAB
&
failslab_attr
.
attr
,
#endif
NULL
};
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录