提交 2262185c 编写于 作者: R Roman Gushchin 提交者: Linus Torvalds

mm: per-cgroup memory reclaim stats

Track the following reclaim counters for every memory cgroup: PGREFILL,
PGSCAN, PGSTEAL, PGACTIVATE, PGDEACTIVATE, PGLAZYFREE and PGLAZYFREED.

These values are exposed using the memory.stats interface of cgroup v2.

The meaning of each value is the same as for global counters, available
using /proc/vmstat.

Also, for consistency, rename mem_cgroup_count_vm_event() to
count_memcg_event_mm().

Link: http://lkml.kernel.org/r/1494530183-30808-1-git-send-email-guro@fb.comSigned-off-by: NRoman Gushchin <guro@fb.com>
Suggested-by: NJohannes Weiner <hannes@cmpxchg.org>
Acked-by: NMichal Hocko <mhocko@suse.com>
Acked-by: NVladimir Davydov <vdavydov.dev@gmail.com>
Acked-by: NJohannes Weiner <hannes@cmpxchg.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Li Zefan <lizefan@huawei.com>
Cc: Balbir Singh <bsingharora@gmail.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 94f4a161
...@@ -956,6 +956,34 @@ PAGE_SIZE multiple when read back. ...@@ -956,6 +956,34 @@ PAGE_SIZE multiple when read back.
Number of times a shadow node has been reclaimed Number of times a shadow node has been reclaimed
pgrefill
Amount of scanned pages (in an active LRU list)
pgscan
Amount of scanned pages (in an inactive LRU list)
pgsteal
Amount of reclaimed pages
pgactivate
Amount of pages moved to the active LRU list
pgdeactivate
Amount of pages moved to the inactive LRU lis
pglazyfree
Amount of pages postponed to be freed under memory pressure
pglazyfreed
Amount of reclaimed lazyfree pages
memory.swap.current memory.swap.current
A read-only single value file which exists on non-root A read-only single value file which exists on non-root
......
...@@ -1213,7 +1213,7 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, ...@@ -1213,7 +1213,7 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf,
case IOMAP_MAPPED: case IOMAP_MAPPED:
if (iomap.flags & IOMAP_F_NEW) { if (iomap.flags & IOMAP_F_NEW) {
count_vm_event(PGMAJFAULT); count_vm_event(PGMAJFAULT);
mem_cgroup_count_vm_event(vmf->vma->vm_mm, PGMAJFAULT); count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
major = VM_FAULT_MAJOR; major = VM_FAULT_MAJOR;
} }
error = dax_insert_mapping(mapping, iomap.bdev, iomap.dax_dev, error = dax_insert_mapping(mapping, iomap.bdev, iomap.dax_dev,
......
...@@ -89,7 +89,7 @@ static int ncp_file_mmap_fault(struct vm_fault *vmf) ...@@ -89,7 +89,7 @@ static int ncp_file_mmap_fault(struct vm_fault *vmf)
* -- nyc * -- nyc
*/ */
count_vm_event(PGMAJFAULT); count_vm_event(PGMAJFAULT);
mem_cgroup_count_vm_event(vmf->vma->vm_mm, PGMAJFAULT); count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
return VM_FAULT_MAJOR; return VM_FAULT_MAJOR;
} }
......
...@@ -357,6 +357,17 @@ static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) ...@@ -357,6 +357,17 @@ static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
} }
struct mem_cgroup *mem_cgroup_from_id(unsigned short id); struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
{
struct mem_cgroup_per_node *mz;
if (mem_cgroup_disabled())
return NULL;
mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
return mz->memcg;
}
/** /**
* parent_mem_cgroup - find the accounting parent of a memcg * parent_mem_cgroup - find the accounting parent of a memcg
* @memcg: memcg whose parent to find * @memcg: memcg whose parent to find
...@@ -546,7 +557,22 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, ...@@ -546,7 +557,22 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
gfp_t gfp_mask, gfp_t gfp_mask,
unsigned long *total_scanned); unsigned long *total_scanned);
static inline void mem_cgroup_count_vm_event(struct mm_struct *mm, static inline void count_memcg_events(struct mem_cgroup *memcg,
enum vm_event_item idx,
unsigned long count)
{
if (!mem_cgroup_disabled())
this_cpu_add(memcg->stat->events[idx], count);
}
static inline void count_memcg_page_event(struct page *page,
enum memcg_stat_item idx)
{
if (page->mem_cgroup)
count_memcg_events(page->mem_cgroup, idx, 1);
}
static inline void count_memcg_event_mm(struct mm_struct *mm,
enum vm_event_item idx) enum vm_event_item idx)
{ {
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
...@@ -675,6 +701,11 @@ static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) ...@@ -675,6 +701,11 @@ static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
return NULL; return NULL;
} }
static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
{
return NULL;
}
static inline bool mem_cgroup_online(struct mem_cgroup *memcg) static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
{ {
return true; return true;
...@@ -789,8 +820,19 @@ static inline void mem_cgroup_split_huge_fixup(struct page *head) ...@@ -789,8 +820,19 @@ static inline void mem_cgroup_split_huge_fixup(struct page *head)
{ {
} }
static inline void count_memcg_events(struct mem_cgroup *memcg,
enum vm_event_item idx,
unsigned long count)
{
}
static inline void count_memcg_page_event(struct page *page,
enum memcg_stat_item idx)
{
}
static inline static inline
void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
{ {
} }
#endif /* CONFIG_MEMCG */ #endif /* CONFIG_MEMCG */
......
...@@ -2265,7 +2265,7 @@ int filemap_fault(struct vm_fault *vmf) ...@@ -2265,7 +2265,7 @@ int filemap_fault(struct vm_fault *vmf)
/* No page in the page cache at all */ /* No page in the page cache at all */
do_sync_mmap_readahead(vmf->vma, ra, file, offset); do_sync_mmap_readahead(vmf->vma, ra, file, offset);
count_vm_event(PGMAJFAULT); count_vm_event(PGMAJFAULT);
mem_cgroup_count_vm_event(vmf->vma->vm_mm, PGMAJFAULT); count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
ret = VM_FAULT_MAJOR; ret = VM_FAULT_MAJOR;
retry_find: retry_find:
page = find_get_page(mapping, offset); page = find_get_page(mapping, offset);
......
...@@ -5230,6 +5230,16 @@ static int memory_stat_show(struct seq_file *m, void *v) ...@@ -5230,6 +5230,16 @@ static int memory_stat_show(struct seq_file *m, void *v)
seq_printf(m, "pgfault %lu\n", events[PGFAULT]); seq_printf(m, "pgfault %lu\n", events[PGFAULT]);
seq_printf(m, "pgmajfault %lu\n", events[PGMAJFAULT]); seq_printf(m, "pgmajfault %lu\n", events[PGMAJFAULT]);
seq_printf(m, "pgrefill %lu\n", events[PGREFILL]);
seq_printf(m, "pgscan %lu\n", events[PGSCAN_KSWAPD] +
events[PGSCAN_DIRECT]);
seq_printf(m, "pgsteal %lu\n", events[PGSTEAL_KSWAPD] +
events[PGSTEAL_DIRECT]);
seq_printf(m, "pgactivate %lu\n", events[PGACTIVATE]);
seq_printf(m, "pgdeactivate %lu\n", events[PGDEACTIVATE]);
seq_printf(m, "pglazyfree %lu\n", events[PGLAZYFREE]);
seq_printf(m, "pglazyfreed %lu\n", events[PGLAZYFREED]);
seq_printf(m, "workingset_refault %lu\n", seq_printf(m, "workingset_refault %lu\n",
stat[WORKINGSET_REFAULT]); stat[WORKINGSET_REFAULT]);
seq_printf(m, "workingset_activate %lu\n", seq_printf(m, "workingset_activate %lu\n",
......
...@@ -2719,7 +2719,7 @@ int do_swap_page(struct vm_fault *vmf) ...@@ -2719,7 +2719,7 @@ int do_swap_page(struct vm_fault *vmf)
/* Had to read the page from swap area: Major fault */ /* Had to read the page from swap area: Major fault */
ret = VM_FAULT_MAJOR; ret = VM_FAULT_MAJOR;
count_vm_event(PGMAJFAULT); count_vm_event(PGMAJFAULT);
mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
} else if (PageHWPoison(page)) { } else if (PageHWPoison(page)) {
/* /*
* hwpoisoned dirty swapcache pages are kept for killing * hwpoisoned dirty swapcache pages are kept for killing
...@@ -3837,7 +3837,7 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address, ...@@ -3837,7 +3837,7 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
count_vm_event(PGFAULT); count_vm_event(PGFAULT);
mem_cgroup_count_vm_event(vma->vm_mm, PGFAULT); count_memcg_event_mm(vma->vm_mm, PGFAULT);
/* do counter updates before entering really critical section. */ /* do counter updates before entering really critical section. */
check_sync_rss_stat(current); check_sync_rss_stat(current);
......
...@@ -1646,8 +1646,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, ...@@ -1646,8 +1646,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
if (fault_type) { if (fault_type) {
*fault_type |= VM_FAULT_MAJOR; *fault_type |= VM_FAULT_MAJOR;
count_vm_event(PGMAJFAULT); count_vm_event(PGMAJFAULT);
mem_cgroup_count_vm_event(charge_mm, count_memcg_event_mm(charge_mm, PGMAJFAULT);
PGMAJFAULT);
} }
/* Here we actually start the io */ /* Here we actually start the io */
page = shmem_swapin(swap, gfp, info, index); page = shmem_swapin(swap, gfp, info, index);
......
...@@ -591,6 +591,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec, ...@@ -591,6 +591,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE); add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
__count_vm_events(PGLAZYFREE, hpage_nr_pages(page)); __count_vm_events(PGLAZYFREE, hpage_nr_pages(page));
count_memcg_page_event(page, PGLAZYFREE);
update_page_reclaim_stat(lruvec, 1, 0); update_page_reclaim_stat(lruvec, 1, 0);
} }
} }
......
...@@ -1294,6 +1294,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -1294,6 +1294,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
} }
count_vm_event(PGLAZYFREED); count_vm_event(PGLAZYFREED);
count_memcg_page_event(page, PGLAZYFREED);
} else if (!mapping || !__remove_mapping(mapping, page, true)) } else if (!mapping || !__remove_mapping(mapping, page, true))
goto keep_locked; goto keep_locked;
/* /*
...@@ -1323,6 +1324,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -1323,6 +1324,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
if (!PageMlocked(page)) { if (!PageMlocked(page)) {
SetPageActive(page); SetPageActive(page);
pgactivate++; pgactivate++;
count_memcg_page_event(page, PGACTIVATE);
} }
keep_locked: keep_locked:
unlock_page(page); unlock_page(page);
...@@ -1762,11 +1764,16 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, ...@@ -1762,11 +1764,16 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
reclaim_stat->recent_scanned[file] += nr_taken; reclaim_stat->recent_scanned[file] += nr_taken;
if (global_reclaim(sc)) { if (current_is_kswapd()) {
if (current_is_kswapd()) if (global_reclaim(sc))
__count_vm_events(PGSCAN_KSWAPD, nr_scanned); __count_vm_events(PGSCAN_KSWAPD, nr_scanned);
else count_memcg_events(lruvec_memcg(lruvec), PGSCAN_KSWAPD,
nr_scanned);
} else {
if (global_reclaim(sc))
__count_vm_events(PGSCAN_DIRECT, nr_scanned); __count_vm_events(PGSCAN_DIRECT, nr_scanned);
count_memcg_events(lruvec_memcg(lruvec), PGSCAN_DIRECT,
nr_scanned);
} }
spin_unlock_irq(&pgdat->lru_lock); spin_unlock_irq(&pgdat->lru_lock);
...@@ -1778,11 +1785,16 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, ...@@ -1778,11 +1785,16 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
spin_lock_irq(&pgdat->lru_lock); spin_lock_irq(&pgdat->lru_lock);
if (global_reclaim(sc)) { if (current_is_kswapd()) {
if (current_is_kswapd()) if (global_reclaim(sc))
__count_vm_events(PGSTEAL_KSWAPD, nr_reclaimed); __count_vm_events(PGSTEAL_KSWAPD, nr_reclaimed);
else count_memcg_events(lruvec_memcg(lruvec), PGSTEAL_KSWAPD,
nr_reclaimed);
} else {
if (global_reclaim(sc))
__count_vm_events(PGSTEAL_DIRECT, nr_reclaimed); __count_vm_events(PGSTEAL_DIRECT, nr_reclaimed);
count_memcg_events(lruvec_memcg(lruvec), PGSTEAL_DIRECT,
nr_reclaimed);
} }
putback_inactive_pages(lruvec, &page_list); putback_inactive_pages(lruvec, &page_list);
...@@ -1927,8 +1939,11 @@ static unsigned move_active_pages_to_lru(struct lruvec *lruvec, ...@@ -1927,8 +1939,11 @@ static unsigned move_active_pages_to_lru(struct lruvec *lruvec,
} }
} }
if (!is_active_lru(lru)) if (!is_active_lru(lru)) {
__count_vm_events(PGDEACTIVATE, nr_moved); __count_vm_events(PGDEACTIVATE, nr_moved);
count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
nr_moved);
}
return nr_moved; return nr_moved;
} }
...@@ -1966,6 +1981,7 @@ static void shrink_active_list(unsigned long nr_to_scan, ...@@ -1966,6 +1981,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
reclaim_stat->recent_scanned[file] += nr_taken; reclaim_stat->recent_scanned[file] += nr_taken;
__count_vm_events(PGREFILL, nr_scanned); __count_vm_events(PGREFILL, nr_scanned);
count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned);
spin_unlock_irq(&pgdat->lru_lock); spin_unlock_irq(&pgdat->lru_lock);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册