diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f5746b4c5a3f9cdb117c165df486d3fc81df68f5..d85b5a0bfda63d67f29fc6977708bd12a20577d1 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -593,6 +593,7 @@ static void bad_page(struct page *page, const char *reason, void free_compound_page(struct page *page) { + mem_cgroup_uncharge(page); __free_pages_ok(page, compound_order(page)); } diff --git a/mm/swap.c b/mm/swap.c index bdb9b294afbf746db62271405569add1f74838bc..002c98a815555393022e47d4470ea9996eda2b8c 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -71,12 +71,12 @@ static void __page_cache_release(struct page *page) spin_unlock_irqrestore(zone_lru_lock(zone), flags); } __ClearPageWaiters(page); - mem_cgroup_uncharge(page); } static void __put_single_page(struct page *page) { __page_cache_release(page); + mem_cgroup_uncharge(page); free_unref_page(page); } diff --git a/mm/vmscan.c b/mm/vmscan.c index f49c11a6cff37c75b8bea70db23daa5e712d6595..497df95dd7b8fc40d30844fc3c598b83d233e9b6 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1475,10 +1475,9 @@ static unsigned long shrink_page_list(struct list_head *page_list, * Is there need to periodically free_page_list? It would * appear not as the counts should be low */ - if (unlikely(PageTransHuge(page))) { - mem_cgroup_uncharge(page); + if (unlikely(PageTransHuge(page))) (*get_compound_page_dtor(page))(page); - } else + else list_add(&page->lru, &free_pages); continue; @@ -1876,7 +1875,6 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list) if (unlikely(PageCompound(page))) { spin_unlock_irq(&pgdat->lru_lock); - mem_cgroup_uncharge(page); (*get_compound_page_dtor(page))(page); spin_lock_irq(&pgdat->lru_lock); } else