提交 7f4b35d1 编写于 作者: T Tejun Heo 提交者: Jens Axboe

block: allocate io_context upfront

Block layer very lazy allocation of ioc.  It waits until the moment
ioc is absolutely necessary; unfortunately, that time could be inside
queue lock and __get_request() performs unlock - try alloc - retry
dancing.

Just allocate it up-front on entry to block layer.  We're not saving
the rain forest by deferring it to the last possible moment and
complicating things unnecessarily.

This patch is to prepare for further updates to request allocation
path.
Signed-off-by: NTejun Heo <tj@kernel.org>
Acked-by: NVivek Goyal <vgoyal@redhat.com>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
上级 a06e05e6
...@@ -855,15 +855,11 @@ static struct request *__get_request(struct request_queue *q, int rw_flags, ...@@ -855,15 +855,11 @@ static struct request *__get_request(struct request_queue *q, int rw_flags,
{ {
struct request *rq; struct request *rq;
struct request_list *rl = &q->rq; struct request_list *rl = &q->rq;
struct elevator_type *et; struct elevator_type *et = q->elevator->type;
struct io_context *ioc; struct io_context *ioc = rq_ioc(bio);
struct io_cq *icq = NULL; struct io_cq *icq = NULL;
const bool is_sync = rw_is_sync(rw_flags) != 0; const bool is_sync = rw_is_sync(rw_flags) != 0;
bool retried = false;
int may_queue; int may_queue;
retry:
et = q->elevator->type;
ioc = rq_ioc(bio);
if (unlikely(blk_queue_dead(q))) if (unlikely(blk_queue_dead(q)))
return NULL; return NULL;
...@@ -874,20 +870,6 @@ static struct request *__get_request(struct request_queue *q, int rw_flags, ...@@ -874,20 +870,6 @@ static struct request *__get_request(struct request_queue *q, int rw_flags,
if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
if (rl->count[is_sync]+1 >= q->nr_requests) { if (rl->count[is_sync]+1 >= q->nr_requests) {
/*
* We want ioc to record batching state. If it's
* not already there, creating a new one requires
* dropping queue_lock, which in turn requires
* retesting conditions to avoid queue hang.
*/
if (!ioc && !retried) {
spin_unlock_irq(q->queue_lock);
create_io_context(gfp_mask, q->node);
spin_lock_irq(q->queue_lock);
retried = true;
goto retry;
}
/* /*
* The queue will fill after this allocation, so set * The queue will fill after this allocation, so set
* it as full, and mark this process as "batching". * it as full, and mark this process as "batching".
...@@ -955,12 +937,8 @@ static struct request *__get_request(struct request_queue *q, int rw_flags, ...@@ -955,12 +937,8 @@ static struct request *__get_request(struct request_queue *q, int rw_flags,
/* init elvpriv */ /* init elvpriv */
if (rw_flags & REQ_ELVPRIV) { if (rw_flags & REQ_ELVPRIV) {
if (unlikely(et->icq_cache && !icq)) { if (unlikely(et->icq_cache && !icq)) {
create_io_context(gfp_mask, q->node); if (ioc)
ioc = rq_ioc(bio); icq = ioc_create_icq(ioc, q, gfp_mask);
if (!ioc)
goto fail_elvpriv;
icq = ioc_create_icq(ioc, q, gfp_mask);
if (!icq) if (!icq)
goto fail_elvpriv; goto fail_elvpriv;
} }
...@@ -1071,7 +1049,6 @@ static struct request *get_request(struct request_queue *q, int rw_flags, ...@@ -1071,7 +1049,6 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
* to allocate at least one request, and up to a big batch of them * to allocate at least one request, and up to a big batch of them
* for a small period time. See ioc_batching, ioc_set_batching * for a small period time. See ioc_batching, ioc_set_batching
*/ */
create_io_context(GFP_NOIO, q->node);
ioc_set_batching(q, current->io_context); ioc_set_batching(q, current->io_context);
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
...@@ -1086,6 +1063,9 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) ...@@ -1086,6 +1063,9 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
BUG_ON(rw != READ && rw != WRITE); BUG_ON(rw != READ && rw != WRITE);
/* create ioc upfront */
create_io_context(gfp_mask, q->node);
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
rq = get_request(q, rw, NULL, gfp_mask); rq = get_request(q, rw, NULL, gfp_mask);
if (!rq) if (!rq)
...@@ -1698,6 +1678,14 @@ generic_make_request_checks(struct bio *bio) ...@@ -1698,6 +1678,14 @@ generic_make_request_checks(struct bio *bio)
goto end_io; goto end_io;
} }
/*
* Various block parts want %current->io_context and lazy ioc
* allocation ends up trading a lot of pain for a small amount of
* memory. Just allocate it upfront. This may fail and block
* layer knows how to live with it.
*/
create_io_context(GFP_ATOMIC, q->node);
if (blk_throtl_bio(q, bio)) if (blk_throtl_bio(q, bio))
return false; /* throttled, will be resubmitted later */ return false; /* throttled, will be resubmitted later */
......
...@@ -1123,9 +1123,6 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio) ...@@ -1123,9 +1123,6 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
goto out; goto out;
} }
/* bio_associate_current() needs ioc, try creating */
create_io_context(GFP_ATOMIC, q->node);
/* /*
* A throtl_grp pointer retrieved under rcu can be used to access * A throtl_grp pointer retrieved under rcu can be used to access
* basic fields like stats and io rates. If a group has no rules, * basic fields like stats and io rates. If a group has no rules,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册