提交 9ce70c02 编写于 作者: H Hugh Dickins 提交者: Linus Torvalds

memcg: fix deadlock by inverting lrucare nesting

We have forgotten the rules of lock nesting: the irq-safe ones must be
taken inside the non-irq-safe ones, otherwise we are open to deadlock:

CPU0                          CPU1
----                          ----
lock(&(&pc->lock)->rlock);
                              local_irq_disable();
                              lock(&(&zone->lru_lock)->rlock);
                              lock(&(&pc->lock)->rlock);
<Interrupt>
lock(&(&zone->lru_lock)->rlock);

To check a different locking issue, I happened to add a spin_lock to
memcg's bit_spin_lock in lock_page_cgroup(), and lockdep very quickly
complained about __mem_cgroup_commit_charge_lrucare() (on CPU1 above).

So delete __mem_cgroup_commit_charge_lrucare(), passing a bool lrucare to
__mem_cgroup_commit_charge() instead, taking zone->lru_lock under
lock_page_cgroup() in the lrucare case.

The original was using spin_lock_irqsave, but we'd be in more trouble if
it were ever called at interrupt time: unconditional _irq is enough.  And
ClearPageLRU before del from lru, SetPageLRU before add to lru: no strong
reason, but that is the ordering used consistently elsewhere.

Fixes 36b62ad5 ("memcg: simplify corner case handling
of LRU").
Signed-off-by: NHugh Dickins <hughd@google.com>
Acked-by: NJohannes Weiner <hannes@cmpxchg.org>
Cc: Konstantin Khlebnikov <khlebnikov@openvz.org>
Acked-by: NKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 73737b87
...@@ -2408,8 +2408,12 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, ...@@ -2408,8 +2408,12 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
struct page *page, struct page *page,
unsigned int nr_pages, unsigned int nr_pages,
struct page_cgroup *pc, struct page_cgroup *pc,
enum charge_type ctype) enum charge_type ctype,
bool lrucare)
{ {
struct zone *uninitialized_var(zone);
bool was_on_lru = false;
lock_page_cgroup(pc); lock_page_cgroup(pc);
if (unlikely(PageCgroupUsed(pc))) { if (unlikely(PageCgroupUsed(pc))) {
unlock_page_cgroup(pc); unlock_page_cgroup(pc);
...@@ -2420,6 +2424,21 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, ...@@ -2420,6 +2424,21 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
* we don't need page_cgroup_lock about tail pages, becase they are not * we don't need page_cgroup_lock about tail pages, becase they are not
* accessed by any other context at this point. * accessed by any other context at this point.
*/ */
/*
* In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
* may already be on some other mem_cgroup's LRU. Take care of it.
*/
if (lrucare) {
zone = page_zone(page);
spin_lock_irq(&zone->lru_lock);
if (PageLRU(page)) {
ClearPageLRU(page);
del_page_from_lru_list(zone, page, page_lru(page));
was_on_lru = true;
}
}
pc->mem_cgroup = memcg; pc->mem_cgroup = memcg;
/* /*
* We access a page_cgroup asynchronously without lock_page_cgroup(). * We access a page_cgroup asynchronously without lock_page_cgroup().
...@@ -2443,9 +2462,18 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, ...@@ -2443,9 +2462,18 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
break; break;
} }
if (lrucare) {
if (was_on_lru) {
VM_BUG_ON(PageLRU(page));
SetPageLRU(page);
add_page_to_lru_list(zone, page, page_lru(page));
}
spin_unlock_irq(&zone->lru_lock);
}
mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages); mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages);
unlock_page_cgroup(pc); unlock_page_cgroup(pc);
WARN_ON_ONCE(PageLRU(page));
/* /*
* "charge_statistics" updated event counter. Then, check it. * "charge_statistics" updated event counter. Then, check it.
* Insert ancestor (and ancestor's ancestors), to softlimit RB-tree. * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
...@@ -2643,7 +2671,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, ...@@ -2643,7 +2671,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom); ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
if (ret == -ENOMEM) if (ret == -ENOMEM)
return ret; return ret;
__mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype); __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype, false);
return 0; return 0;
} }
...@@ -2663,35 +2691,6 @@ static void ...@@ -2663,35 +2691,6 @@ static void
__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr, __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
enum charge_type ctype); enum charge_type ctype);
static void
__mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *memcg,
enum charge_type ctype)
{
struct page_cgroup *pc = lookup_page_cgroup(page);
struct zone *zone = page_zone(page);
unsigned long flags;
bool removed = false;
/*
* In some case, SwapCache, FUSE(splice_buf->radixtree), the page
* is already on LRU. It means the page may on some other page_cgroup's
* LRU. Take care of it.
*/
spin_lock_irqsave(&zone->lru_lock, flags);
if (PageLRU(page)) {
del_page_from_lru_list(zone, page, page_lru(page));
ClearPageLRU(page);
removed = true;
}
__mem_cgroup_commit_charge(memcg, page, 1, pc, ctype);
if (removed) {
add_page_to_lru_list(zone, page, page_lru(page));
SetPageLRU(page);
}
spin_unlock_irqrestore(&zone->lru_lock, flags);
return;
}
int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask) gfp_t gfp_mask)
{ {
...@@ -2769,13 +2768,16 @@ static void ...@@ -2769,13 +2768,16 @@ static void
__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg, __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
enum charge_type ctype) enum charge_type ctype)
{ {
struct page_cgroup *pc;
if (mem_cgroup_disabled()) if (mem_cgroup_disabled())
return; return;
if (!memcg) if (!memcg)
return; return;
cgroup_exclude_rmdir(&memcg->css); cgroup_exclude_rmdir(&memcg->css);
__mem_cgroup_commit_charge_lrucare(page, memcg, ctype); pc = lookup_page_cgroup(page);
__mem_cgroup_commit_charge(memcg, page, 1, pc, ctype, true);
/* /*
* Now swap is on-memory. This means this page may be * Now swap is on-memory. This means this page may be
* counted both as mem and swap....double count. * counted both as mem and swap....double count.
...@@ -3248,7 +3250,7 @@ int mem_cgroup_prepare_migration(struct page *page, ...@@ -3248,7 +3250,7 @@ int mem_cgroup_prepare_migration(struct page *page,
ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
else else
ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
__mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype); __mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype, false);
return ret; return ret;
} }
...@@ -3332,7 +3334,7 @@ void mem_cgroup_replace_page_cache(struct page *oldpage, ...@@ -3332,7 +3334,7 @@ void mem_cgroup_replace_page_cache(struct page *oldpage,
* the newpage may be on LRU(or pagevec for LRU) already. We lock * the newpage may be on LRU(or pagevec for LRU) already. We lock
* LRU while we overwrite pc->mem_cgroup. * LRU while we overwrite pc->mem_cgroup.
*/ */
__mem_cgroup_commit_charge_lrucare(newpage, memcg, type); __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type, true);
} }
#ifdef CONFIG_DEBUG_VM #ifdef CONFIG_DEBUG_VM
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册