提交 70d020ae 编写于 作者: L Lu Jialin 提交者: Zheng Zengkai

memcg: make memcg kswapd deal with dirty

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4IMAK?from=project-issue
CVE: NA

--------

The memcg kswapd could set dirty state to memcg if current scan find all
pages are unqueued dirty in the memcg. Then kswapd would write out dirty pages.
Signed-off-by: NLu Jialin <lujialin4@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: Nweiyang wang <wangweiyang2@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 1496d67c
...@@ -272,6 +272,10 @@ enum lruvec_flags { ...@@ -272,6 +272,10 @@ enum lruvec_flags {
LRUVEC_CONGESTED, /* lruvec has many dirty pages LRUVEC_CONGESTED, /* lruvec has many dirty pages
* backed by a congested BDI * backed by a congested BDI
*/ */
LRUVEC_DIRTY, /* reclaim scanning has recently found
* many dirty file pages at the tail of
* the LRU.
*/
}; };
struct lruvec { struct lruvec {
...@@ -595,10 +599,6 @@ struct zone { ...@@ -595,10 +599,6 @@ struct zone {
} ____cacheline_internodealigned_in_smp; } ____cacheline_internodealigned_in_smp;
enum pgdat_flags { enum pgdat_flags {
PGDAT_DIRTY, /* reclaim scanning has recently found
* many dirty file pages at the tail
* of the LRU.
*/
PGDAT_WRITEBACK, /* reclaim scanning has recently found PGDAT_WRITEBACK, /* reclaim scanning has recently found
* many pages under writeback * many pages under writeback
*/ */
......
...@@ -1289,6 +1289,7 @@ static unsigned int shrink_page_list(struct list_head *page_list, ...@@ -1289,6 +1289,7 @@ static unsigned int shrink_page_list(struct list_head *page_list,
LIST_HEAD(free_pages); LIST_HEAD(free_pages);
unsigned int nr_reclaimed = 0; unsigned int nr_reclaimed = 0;
unsigned int pgactivate = 0; unsigned int pgactivate = 0;
struct lruvec *target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
memset(stat, 0, sizeof(*stat)); memset(stat, 0, sizeof(*stat));
cond_resched(); cond_resched();
...@@ -1535,7 +1536,7 @@ static unsigned int shrink_page_list(struct list_head *page_list, ...@@ -1535,7 +1536,7 @@ static unsigned int shrink_page_list(struct list_head *page_list,
*/ */
if (page_is_file_lru(page) && if (page_is_file_lru(page) &&
(!current_is_kswapd() || !PageReclaim(page) || (!current_is_kswapd() || !PageReclaim(page) ||
!test_bit(PGDAT_DIRTY, &pgdat->flags))) { !test_bit(LRUVEC_DIRTY, &target_lruvec->flags))) {
/* /*
* Immediately reclaim when written back. * Immediately reclaim when written back.
* Similar in principal to deactivate_page() * Similar in principal to deactivate_page()
...@@ -3068,7 +3069,7 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc) ...@@ -3068,7 +3069,7 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
/* Allow kswapd to start writing pages during reclaim.*/ /* Allow kswapd to start writing pages during reclaim.*/
if (sc->nr.unqueued_dirty == sc->nr.file_taken) if (sc->nr.unqueued_dirty == sc->nr.file_taken)
set_bit(PGDAT_DIRTY, &pgdat->flags); set_bit(LRUVEC_DIRTY, &target_lruvec->flags);
/* /*
* If kswapd scans pages marked for immediate * If kswapd scans pages marked for immediate
...@@ -3088,7 +3089,7 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc) ...@@ -3088,7 +3089,7 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
* Legacy memcg will stall in page writeback so avoid forcibly * Legacy memcg will stall in page writeback so avoid forcibly
* stalling in wait_iff_congested(). * stalling in wait_iff_congested().
*/ */
if ((current_is_kswapd() || if (((current_is_kswapd() && !cgroup_reclaim(sc))||
(cgroup_reclaim(sc) && writeback_throttling_sane(sc))) && (cgroup_reclaim(sc) && writeback_throttling_sane(sc))) &&
sc->nr.dirty && sc->nr.dirty == sc->nr.congested) sc->nr.dirty && sc->nr.dirty == sc->nr.congested)
set_bit(LRUVEC_CONGESTED, &target_lruvec->flags); set_bit(LRUVEC_CONGESTED, &target_lruvec->flags);
...@@ -3322,6 +3323,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, ...@@ -3322,6 +3323,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup,
zone->zone_pgdat); zone->zone_pgdat);
clear_bit(LRUVEC_CONGESTED, &lruvec->flags); clear_bit(LRUVEC_CONGESTED, &lruvec->flags);
if (current_is_kswapd())
clear_bit(LRUVEC_DIRTY, &lruvec->flags);
} }
} }
...@@ -3712,7 +3715,7 @@ static void clear_pgdat_congested(pg_data_t *pgdat) ...@@ -3712,7 +3715,7 @@ static void clear_pgdat_congested(pg_data_t *pgdat)
struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat); struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat);
clear_bit(LRUVEC_CONGESTED, &lruvec->flags); clear_bit(LRUVEC_CONGESTED, &lruvec->flags);
clear_bit(PGDAT_DIRTY, &pgdat->flags); clear_bit(LRUVEC_DIRTY, &pgdat->flags);
clear_bit(PGDAT_WRITEBACK, &pgdat->flags); clear_bit(PGDAT_WRITEBACK, &pgdat->flags);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册