提交 73f0d49a 编写于 作者: T Tejun Heo

blk-throttle: move bio_lists[] and friends to throtl_service_queue

throtl_service_queues will eventually form a tree which is anchored at
throtl_data->service_queue and queue bios will climb the tree to the
top service_queue to be executed.

This patch moves bio_lists[] and nr_queued[] from throtl_grp to its
service_queue to prepare for that.  As currently only the
throtl_data->service_queue is in use, this patch just ends up moving
throtl_grp->bio_lists[] and ->nr_queued[] to
throtl_grp->service_queue.bio_lists[] and ->nr_queued[] without making
any functional differences.
Signed-off-by: NTejun Heo <tj@kernel.org>
Acked-by: NVivek Goyal <vgoyal@redhat.com>
上级 49a2f1e3
master alk-4.19.24 alk-4.19.30 alk-4.19.34 alk-4.19.36 alk-4.19.43 alk-4.19.48 alk-4.19.57 ck-4.19.67 ck-4.19.81 ck-4.19.91 github/fork/deepanshu1422/fix-typo-in-comment github/fork/haosdent/fix-typo linux-next v4.19.91 v4.19.90 v4.19.89 v4.19.88 v4.19.87 v4.19.86 v4.19.85 v4.19.84 v4.19.83 v4.19.82 v4.19.81 v4.19.80 v4.19.79 v4.19.78 v4.19.77 v4.19.76 v4.19.75 v4.19.74 v4.19.73 v4.19.72 v4.19.71 v4.19.70 v4.19.69 v4.19.68 v4.19.67 v4.19.66 v4.19.65 v4.19.64 v4.19.63 v4.19.62 v4.19.61 v4.19.60 v4.19.59 v4.19.58 v4.19.57 v4.19.56 v4.19.55 v4.19.54 v4.19.53 v4.19.52 v4.19.51 v4.19.50 v4.19.49 v4.19.48 v4.19.47 v4.19.46 v4.19.45 v4.19.44 v4.19.43 v4.19.42 v4.19.41 v4.19.40 v4.19.39 v4.19.38 v4.19.37 v4.19.36 v4.19.35 v4.19.34 v4.19.33 v4.19.32 v4.19.31 v4.19.30 v4.19.29 v4.19.28 v4.19.27 v4.19.26 v4.19.25 v4.19.24 v4.19.23 v4.19.22 v4.19.21 v4.19.20 v4.19.19 v4.19.18 v4.19.17 v4.19.16 v4.19.15 v4.19.14 v4.19.13 v4.19.12 v4.19.11 v4.19.10 v4.19.9 v4.19.8 v4.19.7 v4.19.6 v4.19.5 v4.19.4 v4.19.3 v4.19.2 v4.19.1 v4.19 v4.19-rc8 v4.19-rc7 v4.19-rc6 v4.19-rc5 v4.19-rc4 v4.19-rc3 v4.19-rc2 v4.19-rc1 ck-release-21 ck-release-20 ck-release-19.2 ck-release-19.1 ck-release-19 ck-release-18 ck-release-17.2 ck-release-17.1 ck-release-17 ck-release-16 ck-release-15.1 ck-release-15 ck-release-14 ck-release-13.2 ck-release-13 ck-release-12 ck-release-11 ck-release-10 ck-release-9 ck-release-7 alk-release-15 alk-release-14 alk-release-13.2 alk-release-13 alk-release-12 alk-release-11 alk-release-10 alk-release-9 alk-release-7
无相关合并请求
......@@ -27,6 +27,17 @@ static struct blkcg_policy blkcg_policy_throtl;
static struct workqueue_struct *kthrotld_workqueue;
struct throtl_service_queue {
/*
* Bios queued directly to this service_queue or dispatched from
* children throtl_grp's.
*/
struct bio_list bio_lists[2]; /* queued bios [READ/WRITE] */
unsigned int nr_queued[2]; /* number of queued bios */
/*
* RB tree of active children throtl_grp's, which are sorted by
* their ->disptime.
*/
struct rb_root pending_tree; /* RB tree of active tgs */
struct rb_node *first_pending; /* first node in the tree */
unsigned int nr_pending; /* # queued in the tree */
......@@ -69,12 +80,6 @@ struct throtl_grp {
unsigned int flags;
/* Two lists for READ and WRITE */
struct bio_list bio_lists[2];
/* Number of queued bios on READ and WRITE lists */
unsigned int nr_queued[2];
/* bytes per second rate limits */
uint64_t bps[2];
......@@ -193,6 +198,8 @@ static void tg_stats_alloc_fn(struct work_struct *work)
/* init a service_queue, assumes the caller zeroed it */
static void throtl_service_queue_init(struct throtl_service_queue *sq)
{
bio_list_init(&sq->bio_lists[0]);
bio_list_init(&sq->bio_lists[1]);
sq->pending_tree = RB_ROOT;
}
......@@ -204,8 +211,6 @@ static void throtl_pd_init(struct blkcg_gq *blkg)
throtl_service_queue_init(&tg->service_queue);
RB_CLEAR_NODE(&tg->rb_node);
tg->td = blkg->q->td;
bio_list_init(&tg->bio_lists[0]);
bio_list_init(&tg->bio_lists[1]);
tg->bps[READ] = -1;
tg->bps[WRITE] = -1;
......@@ -624,7 +629,8 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
* this function with a different bio if there are other bios
* queued.
*/
BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw]));
BUG_ON(tg->service_queue.nr_queued[rw] &&
bio != bio_list_peek(&tg->service_queue.bio_lists[rw]));
/* If tg->bps = -1, then BW is unlimited */
if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
......@@ -703,12 +709,13 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
static void throtl_add_bio_tg(struct bio *bio, struct throtl_grp *tg,
struct throtl_service_queue *parent_sq)
{
struct throtl_service_queue *sq = &tg->service_queue;
bool rw = bio_data_dir(bio);
bio_list_add(&tg->bio_lists[rw], bio);
bio_list_add(&sq->bio_lists[rw], bio);
/* Take a bio reference on tg */
blkg_get(tg_to_blkg(tg));
tg->nr_queued[rw]++;
sq->nr_queued[rw]++;
tg->td->nr_queued[rw]++;
throtl_enqueue_tg(tg, parent_sq);
}
......@@ -716,13 +723,14 @@ static void throtl_add_bio_tg(struct bio *bio, struct throtl_grp *tg,
static void tg_update_disptime(struct throtl_grp *tg,
struct throtl_service_queue *parent_sq)
{
struct throtl_service_queue *sq = &tg->service_queue;
unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
struct bio *bio;
if ((bio = bio_list_peek(&tg->bio_lists[READ])))
if ((bio = bio_list_peek(&sq->bio_lists[READ])))
tg_may_dispatch(tg, bio, &read_wait);
if ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
if ((bio = bio_list_peek(&sq->bio_lists[WRITE])))
tg_may_dispatch(tg, bio, &write_wait);
min_wait = min(read_wait, write_wait);
......@@ -737,10 +745,11 @@ static void tg_update_disptime(struct throtl_grp *tg,
static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw,
struct bio_list *bl)
{
struct throtl_service_queue *sq = &tg->service_queue;
struct bio *bio;
bio = bio_list_pop(&tg->bio_lists[rw]);
tg->nr_queued[rw]--;
bio = bio_list_pop(&sq->bio_lists[rw]);
sq->nr_queued[rw]--;
/* Drop bio reference on blkg */
blkg_put(tg_to_blkg(tg));
......@@ -756,6 +765,7 @@ static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw,
static int throtl_dispatch_tg(struct throtl_grp *tg, struct bio_list *bl)
{
struct throtl_service_queue *sq = &tg->service_queue;
unsigned int nr_reads = 0, nr_writes = 0;
unsigned int max_nr_reads = throtl_grp_quantum*3/4;
unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
......@@ -763,7 +773,7 @@ static int throtl_dispatch_tg(struct throtl_grp *tg, struct bio_list *bl)
/* Try to dispatch 75% READS and 25% WRITES */
while ((bio = bio_list_peek(&tg->bio_lists[READ])) &&
while ((bio = bio_list_peek(&sq->bio_lists[READ])) &&
tg_may_dispatch(tg, bio, NULL)) {
tg_dispatch_one_bio(tg, bio_data_dir(bio), bl);
......@@ -773,7 +783,7 @@ static int throtl_dispatch_tg(struct throtl_grp *tg, struct bio_list *bl)
break;
}
while ((bio = bio_list_peek(&tg->bio_lists[WRITE])) &&
while ((bio = bio_list_peek(&sq->bio_lists[WRITE])) &&
tg_may_dispatch(tg, bio, NULL)) {
tg_dispatch_one_bio(tg, bio_data_dir(bio), bl);
......@@ -790,10 +800,10 @@ static int throtl_select_dispatch(struct throtl_service_queue *parent_sq,
struct bio_list *bl)
{
unsigned int nr_disp = 0;
struct throtl_grp *tg;
while (1) {
tg = throtl_rb_first(parent_sq);
struct throtl_grp *tg = throtl_rb_first(parent_sq);
struct throtl_service_queue *sq = &tg->service_queue;
if (!tg)
break;
......@@ -805,7 +815,7 @@ static int throtl_select_dispatch(struct throtl_service_queue *parent_sq,
nr_disp += throtl_dispatch_tg(tg, bl);
if (tg->nr_queued[0] || tg->nr_queued[1])
if (sq->nr_queued[0] || sq->nr_queued[1])
tg_update_disptime(tg, parent_sq);
if (nr_disp >= throtl_quantum)
......@@ -1043,6 +1053,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
{
struct throtl_data *td = q->td;
struct throtl_grp *tg;
struct throtl_service_queue *sq;
bool rw = bio_data_dir(bio), update_disptime = true;
struct blkcg *blkcg;
bool throttled = false;
......@@ -1077,7 +1088,9 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
if (unlikely(!tg))
goto out_unlock;
if (tg->nr_queued[rw]) {
sq = &tg->service_queue;
if (sq->nr_queued[rw]) {
/*
* There is already another bio queued in same dir. No
* need to update dispatch time.
......@@ -1112,7 +1125,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
rw == READ ? 'R' : 'W',
tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
tg->io_disp[rw], tg->iops[rw],
tg->nr_queued[READ], tg->nr_queued[WRITE]);
sq->nr_queued[READ], sq->nr_queued[WRITE]);
bio_associate_current(bio);
throtl_add_bio_tg(bio, tg, &q->td->service_queue);
......@@ -1151,11 +1164,13 @@ void blk_throtl_drain(struct request_queue *q)
bio_list_init(&bl);
while ((tg = throtl_rb_first(parent_sq))) {
struct throtl_service_queue *sq = &tg->service_queue;
throtl_dequeue_tg(tg, parent_sq);
while ((bio = bio_list_peek(&tg->bio_lists[READ])))
while ((bio = bio_list_peek(&sq->bio_lists[READ])))
tg_dispatch_one_bio(tg, bio_data_dir(bio), &bl);
while ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
while ((bio = bio_list_peek(&sq->bio_lists[WRITE])))
tg_dispatch_one_bio(tg, bio_data_dir(bio), &bl);
}
spin_unlock_irq(q->queue_lock);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
反馈
建议
客服 返回
顶部