提交 7a7c381d 编写于 作者: C Christoph Hellwig 提交者: Linus Torvalds

[PATCH] slab: stop using list_for_each

Use the _entry variant everywhere to clean the code up a tiny bit.
Signed-off-by: NChristoph Hellwig <hch@lst.de>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 e1b6aa6f
...@@ -1950,8 +1950,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -1950,8 +1950,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
void (*dtor)(void*, struct kmem_cache *, unsigned long)) void (*dtor)(void*, struct kmem_cache *, unsigned long))
{ {
size_t left_over, slab_size, ralign; size_t left_over, slab_size, ralign;
struct kmem_cache *cachep = NULL; struct kmem_cache *cachep = NULL, *pc;
struct list_head *p;
/* /*
* Sanity checks... these are all serious usage bugs. * Sanity checks... these are all serious usage bugs.
...@@ -1971,8 +1970,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, ...@@ -1971,8 +1970,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
mutex_lock(&cache_chain_mutex); mutex_lock(&cache_chain_mutex);
list_for_each(p, &cache_chain) { list_for_each_entry(pc, &cache_chain, next) {
struct kmem_cache *pc = list_entry(p, struct kmem_cache, next);
mm_segment_t old_fs = get_fs(); mm_segment_t old_fs = get_fs();
char tmp; char tmp;
int res; int res;
...@@ -3690,7 +3688,7 @@ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, ...@@ -3690,7 +3688,7 @@ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
*/ */
static void cache_reap(void *unused) static void cache_reap(void *unused)
{ {
struct list_head *walk; struct kmem_cache *searchp;
struct kmem_list3 *l3; struct kmem_list3 *l3;
int node = numa_node_id(); int node = numa_node_id();
...@@ -3701,13 +3699,11 @@ static void cache_reap(void *unused) ...@@ -3701,13 +3699,11 @@ static void cache_reap(void *unused)
return; return;
} }
list_for_each(walk, &cache_chain) { list_for_each_entry(searchp, &cache_chain, next) {
struct kmem_cache *searchp;
struct list_head *p; struct list_head *p;
int tofree; int tofree;
struct slab *slabp; struct slab *slabp;
searchp = list_entry(walk, struct kmem_cache, next);
check_irq_on(); check_irq_on();
/* /*
...@@ -3835,7 +3831,6 @@ static void s_stop(struct seq_file *m, void *p) ...@@ -3835,7 +3831,6 @@ static void s_stop(struct seq_file *m, void *p)
static int s_show(struct seq_file *m, void *p) static int s_show(struct seq_file *m, void *p)
{ {
struct kmem_cache *cachep = p; struct kmem_cache *cachep = p;
struct list_head *q;
struct slab *slabp; struct slab *slabp;
unsigned long active_objs; unsigned long active_objs;
unsigned long num_objs; unsigned long num_objs;
...@@ -3856,15 +3851,13 @@ static int s_show(struct seq_file *m, void *p) ...@@ -3856,15 +3851,13 @@ static int s_show(struct seq_file *m, void *p)
check_irq_on(); check_irq_on();
spin_lock_irq(&l3->list_lock); spin_lock_irq(&l3->list_lock);
list_for_each(q, &l3->slabs_full) { list_for_each_entry(slabp, &l3->slabs_full, list) {
slabp = list_entry(q, struct slab, list);
if (slabp->inuse != cachep->num && !error) if (slabp->inuse != cachep->num && !error)
error = "slabs_full accounting error"; error = "slabs_full accounting error";
active_objs += cachep->num; active_objs += cachep->num;
active_slabs++; active_slabs++;
} }
list_for_each(q, &l3->slabs_partial) { list_for_each_entry(slabp, &l3->slabs_partial, list) {
slabp = list_entry(q, struct slab, list);
if (slabp->inuse == cachep->num && !error) if (slabp->inuse == cachep->num && !error)
error = "slabs_partial inuse accounting error"; error = "slabs_partial inuse accounting error";
if (!slabp->inuse && !error) if (!slabp->inuse && !error)
...@@ -3872,8 +3865,7 @@ static int s_show(struct seq_file *m, void *p) ...@@ -3872,8 +3865,7 @@ static int s_show(struct seq_file *m, void *p)
active_objs += slabp->inuse; active_objs += slabp->inuse;
active_slabs++; active_slabs++;
} }
list_for_each(q, &l3->slabs_free) { list_for_each_entry(slabp, &l3->slabs_free, list) {
slabp = list_entry(q, struct slab, list);
if (slabp->inuse && !error) if (slabp->inuse && !error)
error = "slabs_free/inuse accounting error"; error = "slabs_free/inuse accounting error";
num_slabs++; num_slabs++;
...@@ -3966,7 +3958,7 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer, ...@@ -3966,7 +3958,7 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
{ {
char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
int limit, batchcount, shared, res; int limit, batchcount, shared, res;
struct list_head *p; struct kmem_cache *cachep;
if (count > MAX_SLABINFO_WRITE) if (count > MAX_SLABINFO_WRITE)
return -EINVAL; return -EINVAL;
...@@ -3985,10 +3977,7 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer, ...@@ -3985,10 +3977,7 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
/* Find the cache in the chain of caches. */ /* Find the cache in the chain of caches. */
mutex_lock(&cache_chain_mutex); mutex_lock(&cache_chain_mutex);
res = -EINVAL; res = -EINVAL;
list_for_each(p, &cache_chain) { list_for_each_entry(cachep, &cache_chain, next) {
struct kmem_cache *cachep;
cachep = list_entry(p, struct kmem_cache, next);
if (!strcmp(cachep->name, kbuf)) { if (!strcmp(cachep->name, kbuf)) {
if (limit < 1 || batchcount < 1 || if (limit < 1 || batchcount < 1 ||
batchcount > limit || shared < 0) { batchcount > limit || shared < 0) {
...@@ -4090,7 +4079,6 @@ static void show_symbol(struct seq_file *m, unsigned long address) ...@@ -4090,7 +4079,6 @@ static void show_symbol(struct seq_file *m, unsigned long address)
static int leaks_show(struct seq_file *m, void *p) static int leaks_show(struct seq_file *m, void *p)
{ {
struct kmem_cache *cachep = p; struct kmem_cache *cachep = p;
struct list_head *q;
struct slab *slabp; struct slab *slabp;
struct kmem_list3 *l3; struct kmem_list3 *l3;
const char *name; const char *name;
...@@ -4115,14 +4103,10 @@ static int leaks_show(struct seq_file *m, void *p) ...@@ -4115,14 +4103,10 @@ static int leaks_show(struct seq_file *m, void *p)
check_irq_on(); check_irq_on();
spin_lock_irq(&l3->list_lock); spin_lock_irq(&l3->list_lock);
list_for_each(q, &l3->slabs_full) { list_for_each_entry(slabp, &l3->slabs_full, list)
slabp = list_entry(q, struct slab, list);
handle_slab(n, cachep, slabp); handle_slab(n, cachep, slabp);
} list_for_each_entry(slabp, &l3->slabs_partial, list)
list_for_each(q, &l3->slabs_partial) {
slabp = list_entry(q, struct slab, list);
handle_slab(n, cachep, slabp); handle_slab(n, cachep, slabp);
}
spin_unlock_irq(&l3->list_lock); spin_unlock_irq(&l3->list_lock);
} }
name = cachep->name; name = cachep->name;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册