提交 203a3151 编写于 作者: M Matthew Wilcox (Oracle)

mm/writeback: Add __folio_mark_dirty()

Turn __set_page_dirty() into a wrapper around __folio_mark_dirty().
Convert account_page_dirtied() into folio_account_dirtied() and account
the number of pages in the folio to support multi-page folios.
Signed-off-by: NMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: NDavid Howells <dhowells@redhat.com>
Acked-by: NVlastimil Babka <vbabka@suse.cz>
上级 b5e84594
...@@ -1615,10 +1615,9 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, ...@@ -1615,10 +1615,9 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio, void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
struct bdi_writeback *wb); struct bdi_writeback *wb);
static inline void mem_cgroup_track_foreign_dirty(struct page *page, static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
struct bdi_writeback *wb) struct bdi_writeback *wb)
{ {
struct folio *folio = page_folio(page);
if (mem_cgroup_disabled()) if (mem_cgroup_disabled())
return; return;
...@@ -1643,7 +1642,7 @@ static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, ...@@ -1643,7 +1642,7 @@ static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
{ {
} }
static inline void mem_cgroup_track_foreign_dirty(struct page *page, static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
struct bdi_writeback *wb) struct bdi_writeback *wb)
{ {
} }
......
...@@ -772,8 +772,13 @@ void end_page_writeback(struct page *page); ...@@ -772,8 +772,13 @@ void end_page_writeback(struct page *page);
void folio_end_writeback(struct folio *folio); void folio_end_writeback(struct folio *folio);
void wait_for_stable_page(struct page *page); void wait_for_stable_page(struct page *page);
void folio_wait_stable(struct folio *folio); void folio_wait_stable(struct folio *folio);
void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn);
static inline void __set_page_dirty(struct page *page,
struct address_space *mapping, int warn)
{
__folio_mark_dirty(page_folio(page), mapping, warn);
}
void __set_page_dirty(struct page *, struct address_space *, int warn);
int __set_page_dirty_nobuffers(struct page *page); int __set_page_dirty_nobuffers(struct page *page);
int __set_page_dirty_no_writeback(struct page *page); int __set_page_dirty_no_writeback(struct page *page);
......
...@@ -2438,29 +2438,30 @@ EXPORT_SYMBOL(__set_page_dirty_no_writeback); ...@@ -2438,29 +2438,30 @@ EXPORT_SYMBOL(__set_page_dirty_no_writeback);
* *
* NOTE: This relies on being atomic wrt interrupts. * NOTE: This relies on being atomic wrt interrupts.
*/ */
static void account_page_dirtied(struct page *page, static void folio_account_dirtied(struct folio *folio,
struct address_space *mapping) struct address_space *mapping)
{ {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
trace_writeback_dirty_page(page, mapping); trace_writeback_dirty_page(&folio->page, mapping);
if (mapping_can_writeback(mapping)) { if (mapping_can_writeback(mapping)) {
struct bdi_writeback *wb; struct bdi_writeback *wb;
long nr = folio_nr_pages(folio);
inode_attach_wb(inode, page); inode_attach_wb(inode, &folio->page);
wb = inode_to_wb(inode); wb = inode_to_wb(inode);
__inc_lruvec_page_state(page, NR_FILE_DIRTY); __lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, nr);
__inc_zone_page_state(page, NR_ZONE_WRITE_PENDING); __zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
__inc_node_page_state(page, NR_DIRTIED); __node_stat_mod_folio(folio, NR_DIRTIED, nr);
inc_wb_stat(wb, WB_RECLAIMABLE); wb_stat_mod(wb, WB_RECLAIMABLE, nr);
inc_wb_stat(wb, WB_DIRTIED); wb_stat_mod(wb, WB_DIRTIED, nr);
task_io_account_write(PAGE_SIZE); task_io_account_write(nr * PAGE_SIZE);
current->nr_dirtied++; current->nr_dirtied += nr;
__this_cpu_inc(bdp_ratelimits); __this_cpu_add(bdp_ratelimits, nr);
mem_cgroup_track_foreign_dirty(page, wb); mem_cgroup_track_foreign_dirty(folio, wb);
} }
} }
...@@ -2481,24 +2482,24 @@ void account_page_cleaned(struct page *page, struct address_space *mapping, ...@@ -2481,24 +2482,24 @@ void account_page_cleaned(struct page *page, struct address_space *mapping,
} }
/* /*
* Mark the page dirty, and set it dirty in the page cache, and mark the inode * Mark the folio dirty, and set it dirty in the page cache, and mark
* dirty. * the inode dirty.
* *
* If warn is true, then emit a warning if the page is not uptodate and has * If warn is true, then emit a warning if the folio is not uptodate and has
* not been truncated. * not been truncated.
* *
* The caller must hold lock_page_memcg(). * The caller must hold lock_page_memcg().
*/ */
void __set_page_dirty(struct page *page, struct address_space *mapping, void __folio_mark_dirty(struct folio *folio, struct address_space *mapping,
int warn) int warn)
{ {
unsigned long flags; unsigned long flags;
xa_lock_irqsave(&mapping->i_pages, flags); xa_lock_irqsave(&mapping->i_pages, flags);
if (page->mapping) { /* Race with truncate? */ if (folio->mapping) { /* Race with truncate? */
WARN_ON_ONCE(warn && !PageUptodate(page)); WARN_ON_ONCE(warn && !folio_test_uptodate(folio));
account_page_dirtied(page, mapping); folio_account_dirtied(folio, mapping);
__xa_set_mark(&mapping->i_pages, page_index(page), __xa_set_mark(&mapping->i_pages, folio_index(folio),
PAGECACHE_TAG_DIRTY); PAGECACHE_TAG_DIRTY);
} }
xa_unlock_irqrestore(&mapping->i_pages, flags); xa_unlock_irqrestore(&mapping->i_pages, flags);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册