提交 2849450a 编写于 作者: M Mike Snitzer 提交者: Jens Axboe

blk-mq: introduce blk_mq_delay_kick_requeue_list()

blk_mq_delay_kick_requeue_list() provides the ability to kick the
q->requeue_list after a specified time.  To do this the request_queue's
'requeue_work' member was changed to a delayed_work.

blk_mq_delay_kick_requeue_list() allows DM to defer processing requeued
requests while it doesn't make sense to immediately requeue them
(e.g. when all paths in a DM multipath have failed).
Signed-off-by: NMike Snitzer <snitzer@redhat.com>
Signed-off-by: NJens Axboe <axboe@fb.com>
上级 c5c5ca77
...@@ -502,7 +502,7 @@ EXPORT_SYMBOL(blk_mq_requeue_request); ...@@ -502,7 +502,7 @@ EXPORT_SYMBOL(blk_mq_requeue_request);
static void blk_mq_requeue_work(struct work_struct *work) static void blk_mq_requeue_work(struct work_struct *work)
{ {
struct request_queue *q = struct request_queue *q =
container_of(work, struct request_queue, requeue_work); container_of(work, struct request_queue, requeue_work.work);
LIST_HEAD(rq_list); LIST_HEAD(rq_list);
struct request *rq, *next; struct request *rq, *next;
unsigned long flags; unsigned long flags;
...@@ -557,16 +557,24 @@ EXPORT_SYMBOL(blk_mq_add_to_requeue_list); ...@@ -557,16 +557,24 @@ EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
void blk_mq_cancel_requeue_work(struct request_queue *q) void blk_mq_cancel_requeue_work(struct request_queue *q)
{ {
cancel_work_sync(&q->requeue_work); cancel_delayed_work_sync(&q->requeue_work);
} }
EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work); EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work);
void blk_mq_kick_requeue_list(struct request_queue *q) void blk_mq_kick_requeue_list(struct request_queue *q)
{ {
kblockd_schedule_work(&q->requeue_work); kblockd_schedule_delayed_work(&q->requeue_work, 0);
} }
EXPORT_SYMBOL(blk_mq_kick_requeue_list); EXPORT_SYMBOL(blk_mq_kick_requeue_list);
void blk_mq_delay_kick_requeue_list(struct request_queue *q,
unsigned long msecs)
{
kblockd_schedule_delayed_work(&q->requeue_work,
msecs_to_jiffies(msecs));
}
EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
void blk_mq_abort_requeue_list(struct request_queue *q) void blk_mq_abort_requeue_list(struct request_queue *q)
{ {
unsigned long flags; unsigned long flags;
...@@ -2084,7 +2092,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, ...@@ -2084,7 +2092,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
q->sg_reserved_size = INT_MAX; q->sg_reserved_size = INT_MAX;
INIT_WORK(&q->requeue_work, blk_mq_requeue_work); INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
INIT_LIST_HEAD(&q->requeue_list); INIT_LIST_HEAD(&q->requeue_list);
spin_lock_init(&q->requeue_lock); spin_lock_init(&q->requeue_lock);
......
...@@ -233,6 +233,7 @@ void blk_mq_requeue_request(struct request *rq); ...@@ -233,6 +233,7 @@ void blk_mq_requeue_request(struct request *rq);
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
void blk_mq_cancel_requeue_work(struct request_queue *q); void blk_mq_cancel_requeue_work(struct request_queue *q);
void blk_mq_kick_requeue_list(struct request_queue *q); void blk_mq_kick_requeue_list(struct request_queue *q);
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
void blk_mq_abort_requeue_list(struct request_queue *q); void blk_mq_abort_requeue_list(struct request_queue *q);
void blk_mq_complete_request(struct request *rq, int error); void blk_mq_complete_request(struct request *rq, int error);
......
...@@ -449,7 +449,7 @@ struct request_queue { ...@@ -449,7 +449,7 @@ struct request_queue {
struct list_head requeue_list; struct list_head requeue_list;
spinlock_t requeue_lock; spinlock_t requeue_lock;
struct work_struct requeue_work; struct delayed_work requeue_work;
struct mutex sysfs_lock; struct mutex sysfs_lock;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册