提交 0a7ae2ff 编写于 作者: J Jens Axboe

block: change the tag sync vs async restriction logic

Make them fully share the tag space, but disallow async requests using
the last any two slots.
Signed-off-by: NJens Axboe <jens.axboe@oracle.com>
上级 ac36552a
...@@ -218,7 +218,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp) ...@@ -218,7 +218,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
} else } else
skip |= QUEUE_ORDSEQ_PREFLUSH; skip |= QUEUE_ORDSEQ_PREFLUSH;
if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight) if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && queue_in_flight(q))
rq = NULL; rq = NULL;
else else
skip |= QUEUE_ORDSEQ_DRAIN; skip |= QUEUE_ORDSEQ_DRAIN;
......
...@@ -1815,7 +1815,7 @@ void blk_dequeue_request(struct request *rq) ...@@ -1815,7 +1815,7 @@ void blk_dequeue_request(struct request *rq)
* the driver side. * the driver side.
*/ */
if (blk_account_rq(rq)) if (blk_account_rq(rq))
q->in_flight++; q->in_flight[rq_is_sync(rq)]++;
} }
/** /**
......
...@@ -336,7 +336,7 @@ EXPORT_SYMBOL(blk_queue_end_tag); ...@@ -336,7 +336,7 @@ EXPORT_SYMBOL(blk_queue_end_tag);
int blk_queue_start_tag(struct request_queue *q, struct request *rq) int blk_queue_start_tag(struct request_queue *q, struct request *rq)
{ {
struct blk_queue_tag *bqt = q->queue_tags; struct blk_queue_tag *bqt = q->queue_tags;
unsigned max_depth, offset; unsigned max_depth;
int tag; int tag;
if (unlikely((rq->cmd_flags & REQ_QUEUED))) { if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
...@@ -355,13 +355,16 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) ...@@ -355,13 +355,16 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
* to starve sync IO on behalf of flooding async IO. * to starve sync IO on behalf of flooding async IO.
*/ */
max_depth = bqt->max_depth; max_depth = bqt->max_depth;
if (rq_is_sync(rq)) if (!rq_is_sync(rq) && max_depth > 1) {
offset = 0; max_depth -= 2;
else if (!max_depth)
offset = max_depth >> 2; max_depth = 1;
if (q->in_flight[0] > max_depth)
return 1;
}
do { do {
tag = find_next_zero_bit(bqt->tag_map, max_depth, offset); tag = find_first_zero_bit(bqt->tag_map, max_depth);
if (tag >= max_depth) if (tag >= max_depth)
return 1; return 1;
......
...@@ -546,7 +546,7 @@ void elv_requeue_request(struct request_queue *q, struct request *rq) ...@@ -546,7 +546,7 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)
* in_flight count again * in_flight count again
*/ */
if (blk_account_rq(rq)) { if (blk_account_rq(rq)) {
q->in_flight--; q->in_flight[rq_is_sync(rq)]--;
if (blk_sorted_rq(rq)) if (blk_sorted_rq(rq))
elv_deactivate_rq(q, rq); elv_deactivate_rq(q, rq);
} }
...@@ -685,7 +685,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) ...@@ -685,7 +685,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
if (unplug_it && blk_queue_plugged(q)) { if (unplug_it && blk_queue_plugged(q)) {
int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC] int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC]
- q->in_flight; - queue_in_flight(q);
if (nrq >= q->unplug_thresh) if (nrq >= q->unplug_thresh)
__generic_unplug_device(q); __generic_unplug_device(q);
...@@ -823,7 +823,7 @@ void elv_completed_request(struct request_queue *q, struct request *rq) ...@@ -823,7 +823,7 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
* request is released from the driver, io must be done * request is released from the driver, io must be done
*/ */
if (blk_account_rq(rq)) { if (blk_account_rq(rq)) {
q->in_flight--; q->in_flight[rq_is_sync(rq)]--;
if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn) if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
e->ops->elevator_completed_req_fn(q, rq); e->ops->elevator_completed_req_fn(q, rq);
} }
...@@ -838,7 +838,7 @@ void elv_completed_request(struct request_queue *q, struct request *rq) ...@@ -838,7 +838,7 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
if (!list_empty(&q->queue_head)) if (!list_empty(&q->queue_head))
next = list_entry_rq(q->queue_head.next); next = list_entry_rq(q->queue_head.next);
if (!q->in_flight && if (!queue_in_flight(q) &&
blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN && blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
(!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) { (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) {
blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0); blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
......
...@@ -404,7 +404,7 @@ struct request_queue ...@@ -404,7 +404,7 @@ struct request_queue
struct list_head tag_busy_list; struct list_head tag_busy_list;
unsigned int nr_sorted; unsigned int nr_sorted;
unsigned int in_flight; unsigned int in_flight[2];
unsigned int rq_timeout; unsigned int rq_timeout;
struct timer_list timeout; struct timer_list timeout;
...@@ -511,6 +511,11 @@ static inline void queue_flag_clear_unlocked(unsigned int flag, ...@@ -511,6 +511,11 @@ static inline void queue_flag_clear_unlocked(unsigned int flag,
__clear_bit(flag, &q->queue_flags); __clear_bit(flag, &q->queue_flags);
} }
static inline int queue_in_flight(struct request_queue *q)
{
return q->in_flight[0] + q->in_flight[1];
}
static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
{ {
WARN_ON_ONCE(!queue_is_locked(q)); WARN_ON_ONCE(!queue_is_locked(q));
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册