提交 703c2708 编写于 作者: T Tejun Heo 提交者: Jens Axboe

writeback: implement and use inode_congested()

In several places, bdi_congested() and its wrappers are used to
determine whether more IOs should be issued.  With cgroup writeback
support, this question can't be answered solely based on the bdi
(backing_dev_info).  It's dependent on whether the filesystem and bdi
support cgroup writeback and the blkcg the inode is associated with.

This patch implements inode_congested() and its wrappers which take
@inode and determines the congestion state considering cgroup
writeback.  The new functions replace bdi_*congested() calls in places
where the query is about specific inode and task.

There are several filesystem users which also fit this criteria but
they should be updated when each filesystem implements cgroup
writeback support.

v2: Now that a given inode is associated with only one wb, congestion
    state can be determined independent from the asking task.  Drop
    @task.  Spotted by Vivek.  Also, converted to take @inode instead
    of @mapping and renamed to inode_congested().
Signed-off-by: NTejun Heo <tj@kernel.org>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Jan Kara <jack@suse.cz>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: NJens Axboe <axboe@fb.com>
上级 482cf79c
......@@ -142,6 +142,35 @@ static void __wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
wb_queue_work(wb, work);
}
#ifdef CONFIG_CGROUP_WRITEBACK
/**
* inode_congested - test whether an inode is congested
* @inode: inode to test for congestion
* @cong_bits: mask of WB_[a]sync_congested bits to test
*
* Tests whether @inode is congested. @cong_bits is the mask of congestion
* bits to test and the return value is the mask of set bits.
*
* If cgroup writeback is enabled for @inode, the congestion state is
* determined by whether the cgwb (cgroup bdi_writeback) for the blkcg
* associated with @inode is congested; otherwise, the root wb's congestion
* state is used.
*/
int inode_congested(struct inode *inode, int cong_bits)
{
if (inode) {
struct bdi_writeback *wb = inode_to_wb(inode);
if (wb)
return wb_congested(wb, cong_bits);
}
return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
}
EXPORT_SYMBOL_GPL(inode_congested);
#endif /* CONFIG_CGROUP_WRITEBACK */
/**
* bdi_start_writeback - start writeback
* @bdi: the backing device to write from
......
......@@ -230,6 +230,7 @@ struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
void __inode_attach_wb(struct inode *inode, struct page *page);
void wb_memcg_offline(struct mem_cgroup *memcg);
void wb_blkcg_offline(struct blkcg *blkcg);
int inode_congested(struct inode *inode, int cong_bits);
/**
* inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
......@@ -438,8 +439,29 @@ static inline void wb_blkcg_offline(struct blkcg *blkcg)
{
}
static inline int inode_congested(struct inode *inode, int cong_bits)
{
return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
}
#endif /* CONFIG_CGROUP_WRITEBACK */
static inline int inode_read_congested(struct inode *inode)
{
return inode_congested(inode, 1 << WB_sync_congested);
}
static inline int inode_write_congested(struct inode *inode)
{
return inode_congested(inode, 1 << WB_async_congested);
}
static inline int inode_rw_congested(struct inode *inode)
{
return inode_congested(inode, (1 << WB_sync_congested) |
(1 << WB_async_congested));
}
static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
{
return wb_congested(&bdi->wb, cong_bits);
......
......@@ -115,7 +115,7 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
case POSIX_FADV_NOREUSE:
break;
case POSIX_FADV_DONTNEED:
if (!bdi_write_congested(bdi))
if (!inode_write_congested(mapping->host))
__filemap_fdatawrite_range(mapping, offset, endbyte,
WB_SYNC_NONE);
......
......@@ -541,7 +541,7 @@ page_cache_async_readahead(struct address_space *mapping,
/*
* Defer asynchronous read-ahead on IO congestion.
*/
if (bdi_read_congested(inode_to_bdi(mapping->host)))
if (inode_read_congested(mapping->host))
return;
/* do read-ahead */
......
......@@ -452,14 +452,13 @@ static inline int is_page_cache_freeable(struct page *page)
return page_count(page) - page_has_private(page) == 2;
}
static int may_write_to_queue(struct backing_dev_info *bdi,
struct scan_control *sc)
static int may_write_to_inode(struct inode *inode, struct scan_control *sc)
{
if (current->flags & PF_SWAPWRITE)
return 1;
if (!bdi_write_congested(bdi))
if (!inode_write_congested(inode))
return 1;
if (bdi == current->backing_dev_info)
if (inode_to_bdi(inode) == current->backing_dev_info)
return 1;
return 0;
}
......@@ -538,7 +537,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
}
if (mapping->a_ops->writepage == NULL)
return PAGE_ACTIVATE;
if (!may_write_to_queue(inode_to_bdi(mapping->host), sc))
if (!may_write_to_inode(mapping->host, sc))
return PAGE_KEEP;
if (clear_page_dirty_for_io(page)) {
......@@ -924,7 +923,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
*/
mapping = page_mapping(page);
if (((dirty || writeback) && mapping &&
bdi_write_congested(inode_to_bdi(mapping->host))) ||
inode_write_congested(mapping->host)) ||
(writeback && PageReclaim(page)))
nr_congested++;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册