提交 cfb17508 编写于 作者: D Dave Chinner 提交者: Jialin Zhang

xfs: bound maximum wait time for inodegc work

mainline inclusion
from mainline-v5.19-rc2
commit 7cf2b0f9
category: bugfix
bugzilla: 187526,https://gitee.com/openeuler/kernel/issues/I6WKVJ

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=7cf2b0f9611b9971d663e1fc3206eeda3b902922

--------------------------------

Currently inodegc work can sit queued on the per-cpu queue until
the workqueue is either flushed of the queue reaches a depth that
triggers work queuing (and later throttling). This means that we
could queue work that waits for a long time for some other event to
trigger flushing.

Hence instead of just queueing work at a specific depth, use a
delayed work that queues the work at a bound time. We can still
schedule the work immediately at a given depth, but we no long need
to worry about leaving a number of items on the list that won't get
processed until external events prevail.
Signed-off-by: NDave Chinner <dchinner@redhat.com>
Reviewed-by: NDarrick J. Wong <djwong@kernel.org>
Signed-off-by: NDarrick J. Wong <djwong@kernel.org>
Signed-off-by: NGuo Xuenan <guoxuenan@huawei.com>
Reviewed-by: NYang Erkun <yangerkun@huawei.com>
Signed-off-by: NJialin Zhang <zhangjialin11@huawei.com>
上级 cc10c7d9
......@@ -447,7 +447,7 @@ xfs_inodegc_queue_all(
for_each_online_cpu(cpu) {
gc = per_cpu_ptr(mp->m_inodegc, cpu);
if (!llist_empty(&gc->list))
queue_work_on(cpu, mp->m_inodegc_wq, &gc->work);
mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
}
}
......@@ -1874,8 +1874,8 @@ void
xfs_inodegc_worker(
struct work_struct *work)
{
struct xfs_inodegc *gc = container_of(work, struct xfs_inodegc,
work);
struct xfs_inodegc *gc = container_of(to_delayed_work(work),
struct xfs_inodegc, work);
struct llist_node *node = llist_del_all(&gc->list);
struct xfs_inode *ip, *n;
unsigned int nofs_flag;
......@@ -2064,6 +2064,7 @@ xfs_inodegc_queue(
struct xfs_inodegc *gc;
int items;
unsigned int shrinker_hits;
unsigned long queue_delay = 1;
trace_xfs_inode_set_need_inactive(ip);
spin_lock(&ip->i_flags_lock);
......@@ -2075,19 +2076,26 @@ xfs_inodegc_queue(
items = READ_ONCE(gc->items);
WRITE_ONCE(gc->items, items + 1);
shrinker_hits = READ_ONCE(gc->shrinker_hits);
put_cpu_ptr(gc);
if (!xfs_is_inodegc_enabled(mp))
/*
* We queue the work while holding the current CPU so that the work
* is scheduled to run on this CPU.
*/
if (!xfs_is_inodegc_enabled(mp)) {
put_cpu_ptr(gc);
return;
}
if (xfs_inodegc_want_queue_work(ip, items))
queue_delay = 0;
if (xfs_inodegc_want_queue_work(ip, items)) {
trace_xfs_inodegc_queue(mp, __return_address);
queue_work(mp->m_inodegc_wq, &gc->work);
}
mod_delayed_work(mp->m_inodegc_wq, &gc->work, queue_delay);
put_cpu_ptr(gc);
if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) {
trace_xfs_inodegc_throttle(mp, __return_address);
flush_work(&gc->work);
flush_delayed_work(&gc->work);
}
}
......@@ -2104,7 +2112,7 @@ xfs_inodegc_cpu_dead(
unsigned int count = 0;
dead_gc = per_cpu_ptr(mp->m_inodegc, dead_cpu);
cancel_work_sync(&dead_gc->work);
cancel_delayed_work_sync(&dead_gc->work);
if (llist_empty(&dead_gc->list))
return;
......@@ -2123,12 +2131,12 @@ xfs_inodegc_cpu_dead(
llist_add_batch(first, last, &gc->list);
count += READ_ONCE(gc->items);
WRITE_ONCE(gc->items, count);
put_cpu_ptr(gc);
if (xfs_is_inodegc_enabled(mp)) {
trace_xfs_inodegc_queue(mp, __return_address);
queue_work(mp->m_inodegc_wq, &gc->work);
mod_delayed_work(mp->m_inodegc_wq, &gc->work, 0);
}
put_cpu_ptr(gc);
}
/*
......@@ -2223,7 +2231,7 @@ xfs_inodegc_shrinker_scan(
unsigned int h = READ_ONCE(gc->shrinker_hits);
WRITE_ONCE(gc->shrinker_hits, h + 1);
queue_work_on(cpu, mp->m_inodegc_wq, &gc->work);
mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
no_items = false;
}
}
......
......@@ -60,7 +60,7 @@ struct xfs_error_cfg {
*/
struct xfs_inodegc {
struct llist_head list;
struct work_struct work;
struct delayed_work work;
/* approximate count of inodes in the list */
unsigned int items;
......
......@@ -1104,7 +1104,7 @@ xfs_inodegc_init_percpu(
gc = per_cpu_ptr(mp->m_inodegc, cpu);
init_llist_head(&gc->list);
gc->items = 0;
INIT_WORK(&gc->work, xfs_inodegc_worker);
INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker);
}
return 0;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册