提交 62e346a8 编写于 作者: C Christoph Lameter 提交者: Pekka Enberg

slub: extract common code to remove objects from partial list without locking

There are a couple of places where repeat the same statements when removing
a page from the partial list. Consolidate that into __remove_partial().
Acked-by: NDavid Rientjes <rientjes@google.com>
Signed-off-by: NChristoph Lameter <cl@linux.com>
Signed-off-by: NPekka Enberg <penberg@kernel.org>
上级 f7cb1933
...@@ -1310,13 +1310,19 @@ static void add_partial(struct kmem_cache_node *n, ...@@ -1310,13 +1310,19 @@ static void add_partial(struct kmem_cache_node *n,
spin_unlock(&n->list_lock); spin_unlock(&n->list_lock);
} }
static inline void __remove_partial(struct kmem_cache_node *n,
struct page *page)
{
list_del(&page->lru);
n->nr_partial--;
}
static void remove_partial(struct kmem_cache *s, struct page *page) static void remove_partial(struct kmem_cache *s, struct page *page)
{ {
struct kmem_cache_node *n = get_node(s, page_to_nid(page)); struct kmem_cache_node *n = get_node(s, page_to_nid(page));
spin_lock(&n->list_lock); spin_lock(&n->list_lock);
list_del(&page->lru); __remove_partial(n, page);
n->nr_partial--;
spin_unlock(&n->list_lock); spin_unlock(&n->list_lock);
} }
...@@ -1329,8 +1335,7 @@ static inline int lock_and_freeze_slab(struct kmem_cache_node *n, ...@@ -1329,8 +1335,7 @@ static inline int lock_and_freeze_slab(struct kmem_cache_node *n,
struct page *page) struct page *page)
{ {
if (slab_trylock(page)) { if (slab_trylock(page)) {
list_del(&page->lru); __remove_partial(n, page);
n->nr_partial--;
__SetPageSlubFrozen(page); __SetPageSlubFrozen(page);
return 1; return 1;
} }
...@@ -2462,9 +2467,8 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) ...@@ -2462,9 +2467,8 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
spin_lock_irqsave(&n->list_lock, flags); spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry_safe(page, h, &n->partial, lru) { list_for_each_entry_safe(page, h, &n->partial, lru) {
if (!page->inuse) { if (!page->inuse) {
list_del(&page->lru); __remove_partial(n, page);
discard_slab(s, page); discard_slab(s, page);
n->nr_partial--;
} else { } else {
list_slab_objects(s, page, list_slab_objects(s, page,
"Objects remaining on kmem_cache_close()"); "Objects remaining on kmem_cache_close()");
...@@ -2822,8 +2826,7 @@ int kmem_cache_shrink(struct kmem_cache *s) ...@@ -2822,8 +2826,7 @@ int kmem_cache_shrink(struct kmem_cache *s)
* may have freed the last object and be * may have freed the last object and be
* waiting to release the slab. * waiting to release the slab.
*/ */
list_del(&page->lru); __remove_partial(n, page);
n->nr_partial--;
slab_unlock(page); slab_unlock(page);
discard_slab(s, page); discard_slab(s, page);
} else { } else {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册