提交 ff26eaad 编写于 作者: T Tejun Heo 提交者: Jens Axboe

blkcg: tg_stats_alloc_lock is an irq lock

tg_stats_alloc_lock nests inside queue lock and should always be held
with irq disabled.  throtl_pd_{init|exit}() were using non-irqsafe
spinlock ops which triggered inverse lock ordering via irq warning via
RCU freeing of blkg invoking throtl_pd_exit() w/o disabling IRQ.

Update both functions to use irq safe operations.
Signed-off-by: NTejun Heo <tj@kernel.org>
Reported-by: NSasha Levin <sasha.levin@oracle.com>
LKML-Reference: <1335339396.16988.80.camel@lappy>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
上级 0b7877d4
......@@ -219,6 +219,7 @@ static void tg_stats_alloc_fn(struct work_struct *work)
static void throtl_pd_init(struct blkcg_gq *blkg)
{
struct throtl_grp *tg = blkg_to_tg(blkg);
unsigned long flags;
RB_CLEAR_NODE(&tg->rb_node);
bio_list_init(&tg->bio_lists[0]);
......@@ -235,19 +236,20 @@ static void throtl_pd_init(struct blkcg_gq *blkg)
* but percpu allocator can't be called from IO path. Queue tg on
* tg_stats_alloc_list and allocate from work item.
*/
spin_lock(&tg_stats_alloc_lock);
spin_lock_irqsave(&tg_stats_alloc_lock, flags);
list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
queue_delayed_work(system_nrt_wq, &tg_stats_alloc_work, 0);
spin_unlock(&tg_stats_alloc_lock);
spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
}
static void throtl_pd_exit(struct blkcg_gq *blkg)
{
struct throtl_grp *tg = blkg_to_tg(blkg);
unsigned long flags;
spin_lock(&tg_stats_alloc_lock);
spin_lock_irqsave(&tg_stats_alloc_lock, flags);
list_del_init(&tg->stats_alloc_node);
spin_unlock(&tg_stats_alloc_lock);
spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
free_percpu(tg->stats_cpu);
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册