提交 08668f83 编写于 作者: Y Yu Kuai 提交者: Yang Yingliang

blk-iocost: fix spin_lock won't release in sq

hulk inclusion
category: feature
bugzilla: 38688
CVE: NA

---------------------------

ioc_rqos_throttle() will be called with spin_lock_irq(q->queue_lock)
called in sq. However, ioc_rqos_throttle() will call
spin_lock_irq(&ioc->lock) and spin_unlock_irq(&ioc->lock) before
'q->queue_lock' is released. Thus, local irqs will be enabled after
'ioc->lock' is released and 'q->queue_lock' might never be released.

spin_lock_irq(q->queue_lock)        --> local irq will be disabled
    rq_qos_throttle
        spin_lock_irq(&ioc->lock)
        spin_unlock_irq(&ioc->lock) --> local irq will be enabled before
                                        'q->queue_lock' is released
        spin_unlock_irq(&q->queue_lock)
        io_schedule()
        spin_lock_irq(q->queue_lock)

Fix the problem by using spin_lock_irqsave()/spin_unlock_irqrestore()
for other locks.
Signed-off-by: NYu Kuai <yukuai3@huawei.com>
Reviewed-by: NHou Tao <houtao1@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 ceb9b8c3
......@@ -1030,6 +1030,7 @@ static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
u64 last_period, cur_period, max_period_delta;
u64 vtime, vmargin, vmin;
int i;
unsigned long flags;
/*
* If seem to be already active, just update the stamp to tell the
......@@ -1047,7 +1048,7 @@ static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
if (iocg->child_active_sum)
return false;
spin_lock_irq(&ioc->lock);
spin_lock_irqsave(&ioc->lock, flags);
ioc_now(ioc, now);
......@@ -1105,11 +1106,11 @@ static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
}
succeed_unlock:
spin_unlock_irq(&ioc->lock);
spin_unlock_irqrestore(&ioc->lock, flags);
return true;
fail_unlock:
spin_unlock_irq(&ioc->lock);
spin_unlock_irqrestore(&ioc->lock, flags);
return false;
}
......@@ -1690,6 +1691,7 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio,
struct iocg_wait wait;
u32 hw_active, hw_inuse;
u64 abs_cost, cost, vtime;
unsigned long flags;
rcu_read_lock();
blkcg = bio_blkcg(bio);
......@@ -1736,9 +1738,9 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio,
time_after_eq64(vtime + ioc->inuse_margin_vtime, now.vnow)) {
TRACE_IOCG_PATH(inuse_reset, iocg, &now,
iocg->inuse, iocg->weight, hw_inuse, hw_active);
spin_lock_irq(&ioc->lock);
spin_lock_irqsave(&ioc->lock, flags);
propagate_active_weight(iocg, iocg->weight, iocg->weight);
spin_unlock_irq(&ioc->lock);
spin_unlock_irqrestore(&ioc->lock, flags);
current_hweight(iocg, &hw_active, &hw_inuse);
}
......@@ -1787,7 +1789,7 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio,
* All waiters are on iocg->waitq and the wait states are
* synchronized using waitq.lock.
*/
spin_lock_irq(&iocg->waitq.lock);
spin_lock_irqsave(&iocg->waitq.lock, flags);
/*
* We activated above but w/o any synchronization. Deactivation is
......@@ -1796,7 +1798,7 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio,
* In the unlikely case that we are deactivated, just issue the IO.
*/
if (unlikely(list_empty(&iocg->active_list))) {
spin_unlock_irq(&iocg->waitq.lock);
spin_unlock_irqrestore(&iocg->waitq.lock, flags);
iocg_commit_bio(iocg, bio, cost);
return;
}
......@@ -1810,7 +1812,7 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio,
__add_wait_queue_entry_tail(&iocg->waitq, &wait.wait);
iocg_kick_waitq(iocg, &now);
spin_unlock_irq(&iocg->waitq.lock);
spin_unlock_irqrestore(&iocg->waitq.lock, flags);
while (true) {
set_current_state(TASK_UNINTERRUPTIBLE);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册