提交 d732580b 编写于 作者: T Tejun Heo 提交者: Jens Axboe

block: implement blk_queue_bypass_start/end()

Rename and extend elv_queisce_start/end() to
blk_queue_bypass_start/end() which are exported and supports nesting
via @q->bypass_depth.  Also add blk_queue_bypass() to test bypass
state.

This will be further extended and used for blkio_group management.
Signed-off-by: NTejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
上级 b2fab5ac
...@@ -409,6 +409,42 @@ void blk_drain_queue(struct request_queue *q, bool drain_all) ...@@ -409,6 +409,42 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
} }
} }
/**
* blk_queue_bypass_start - enter queue bypass mode
* @q: queue of interest
*
* In bypass mode, only the dispatch FIFO queue of @q is used. This
* function makes @q enter bypass mode and drains all requests which were
* issued before. On return, it's guaranteed that no request has ELVPRIV
* set.
*/
void blk_queue_bypass_start(struct request_queue *q)
{
spin_lock_irq(q->queue_lock);
q->bypass_depth++;
queue_flag_set(QUEUE_FLAG_BYPASS, q);
spin_unlock_irq(q->queue_lock);
blk_drain_queue(q, false);
}
EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
/**
* blk_queue_bypass_end - leave queue bypass mode
* @q: queue of interest
*
* Leave bypass mode and restore the normal queueing behavior.
*/
void blk_queue_bypass_end(struct request_queue *q)
{
spin_lock_irq(q->queue_lock);
if (!--q->bypass_depth)
queue_flag_clear(QUEUE_FLAG_BYPASS, q);
WARN_ON_ONCE(q->bypass_depth < 0);
spin_unlock_irq(q->queue_lock);
}
EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
/** /**
* blk_cleanup_queue - shutdown a request queue * blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown * @q: request queue to shutdown
...@@ -862,8 +898,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, ...@@ -862,8 +898,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
* Also, lookup icq while holding queue_lock. If it doesn't exist, * Also, lookup icq while holding queue_lock. If it doesn't exist,
* it will be created after releasing queue_lock. * it will be created after releasing queue_lock.
*/ */
if (blk_rq_should_init_elevator(bio) && if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
!test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags)) {
rw_flags |= REQ_ELVPRIV; rw_flags |= REQ_ELVPRIV;
rl->elvpriv++; rl->elvpriv++;
if (et->icq_cache && ioc) if (et->icq_cache && ioc)
......
...@@ -23,7 +23,8 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, ...@@ -23,7 +23,8 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio); struct bio *bio);
int blk_rq_append_bio(struct request_queue *q, struct request *rq, int blk_rq_append_bio(struct request_queue *q, struct request *rq,
struct bio *bio); struct bio *bio);
void blk_drain_queue(struct request_queue *q, bool drain_all); void blk_queue_bypass_start(struct request_queue *q);
void blk_queue_bypass_end(struct request_queue *q);
void blk_dequeue_request(struct request *rq); void blk_dequeue_request(struct request *rq);
void __blk_queue_free_tags(struct request_queue *q); void __blk_queue_free_tags(struct request_queue *q);
bool __blk_end_bidi_request(struct request *rq, int error, bool __blk_end_bidi_request(struct request *rq, int error,
...@@ -144,9 +145,6 @@ void blk_queue_congestion_threshold(struct request_queue *q); ...@@ -144,9 +145,6 @@ void blk_queue_congestion_threshold(struct request_queue *q);
int blk_dev_init(void); int blk_dev_init(void);
void elv_quiesce_start(struct request_queue *q);
void elv_quiesce_end(struct request_queue *q);
/* /*
* Return the threshold (number of used requests) at which the queue is * Return the threshold (number of used requests) at which the queue is
......
...@@ -553,25 +553,6 @@ void elv_drain_elevator(struct request_queue *q) ...@@ -553,25 +553,6 @@ void elv_drain_elevator(struct request_queue *q)
} }
} }
void elv_quiesce_start(struct request_queue *q)
{
if (!q->elevator)
return;
spin_lock_irq(q->queue_lock);
queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
spin_unlock_irq(q->queue_lock);
blk_drain_queue(q, false);
}
void elv_quiesce_end(struct request_queue *q)
{
spin_lock_irq(q->queue_lock);
queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
spin_unlock_irq(q->queue_lock);
}
void __elv_add_request(struct request_queue *q, struct request *rq, int where) void __elv_add_request(struct request_queue *q, struct request *rq, int where)
{ {
trace_block_rq_insert(q, rq); trace_block_rq_insert(q, rq);
...@@ -903,7 +884,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) ...@@ -903,7 +884,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
* using INSERT_BACK. All requests have SOFTBARRIER set and no * using INSERT_BACK. All requests have SOFTBARRIER set and no
* merge happens either. * merge happens either.
*/ */
elv_quiesce_start(q); blk_queue_bypass_start(q);
/* unregister and clear all auxiliary data of the old elevator */ /* unregister and clear all auxiliary data of the old elevator */
if (registered) if (registered)
...@@ -933,7 +914,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) ...@@ -933,7 +914,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
/* done, kill the old one and finish */ /* done, kill the old one and finish */
elevator_exit(old); elevator_exit(old);
elv_quiesce_end(q); blk_queue_bypass_end(q);
blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name); blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
...@@ -945,7 +926,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) ...@@ -945,7 +926,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
/* switch failed, restore and re-register old elevator */ /* switch failed, restore and re-register old elevator */
q->elevator = old; q->elevator = old;
elv_register_queue(q); elv_register_queue(q);
elv_quiesce_end(q); blk_queue_bypass_end(q);
return err; return err;
} }
......
...@@ -389,6 +389,8 @@ struct request_queue { ...@@ -389,6 +389,8 @@ struct request_queue {
struct mutex sysfs_lock; struct mutex sysfs_lock;
int bypass_depth;
#if defined(CONFIG_BLK_DEV_BSG) #if defined(CONFIG_BLK_DEV_BSG)
bsg_job_fn *bsg_job_fn; bsg_job_fn *bsg_job_fn;
int bsg_job_size; int bsg_job_size;
...@@ -406,7 +408,7 @@ struct request_queue { ...@@ -406,7 +408,7 @@ struct request_queue {
#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
#define QUEUE_FLAG_DEAD 5 /* queue being torn down */ #define QUEUE_FLAG_DEAD 5 /* queue being torn down */
#define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */ #define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */
#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */ #define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */
...@@ -494,6 +496,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) ...@@ -494,6 +496,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_noxmerges(q) \ #define blk_queue_noxmerges(q) \
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册