diff --git a/block/blk-merge.c b/block/blk-merge.c index b3ac40aef46b317c5a432a92ba9b40c8e1942504..89b97b5e0881853054c0807c5607c20d91d90762 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -97,19 +97,22 @@ void blk_recalc_rq_segments(struct request *rq) void blk_recount_segments(struct request_queue *q, struct bio *bio) { - bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE, - &q->queue_flags); - bool merge_not_need = bio->bi_vcnt < queue_max_segments(q); + unsigned short seg_cnt; + + /* estimate segment number by bi_vcnt for non-cloned bio */ + if (bio_flagged(bio, BIO_CLONED)) + seg_cnt = bio_segments(bio); + else + seg_cnt = bio->bi_vcnt; - if (no_sg_merge && !bio_flagged(bio, BIO_CLONED) && - merge_not_need) - bio->bi_phys_segments = bio->bi_vcnt; + if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) && + (seg_cnt < queue_max_segments(q))) + bio->bi_phys_segments = seg_cnt; else { struct bio *nxt = bio->bi_next; bio->bi_next = NULL; - bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, - no_sg_merge && merge_not_need); + bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false); bio->bi_next = nxt; } diff --git a/block/blk-mq.c b/block/blk-mq.c index 68929bad9a6a4048151f485cc1e44db2d655fc0f..1d016fc9a8b640c54ce7e06e9f1ce1f293b694e6 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -107,11 +107,7 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref) wake_up_all(&q->mq_freeze_wq); } -/* - * Guarantee no request is in use, so we can change any data structure of - * the queue afterward. - */ -void blk_mq_freeze_queue(struct request_queue *q) +static void blk_mq_freeze_queue_start(struct request_queue *q) { bool freeze; @@ -123,9 +119,23 @@ void blk_mq_freeze_queue(struct request_queue *q) percpu_ref_kill(&q->mq_usage_counter); blk_mq_run_queues(q, false); } +} + +static void blk_mq_freeze_queue_wait(struct request_queue *q) +{ wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter)); } +/* + * Guarantee no request is in use, so we can change any data structure of + * the queue afterward. + */ +void blk_mq_freeze_queue(struct request_queue *q) +{ + blk_mq_freeze_queue_start(q); + blk_mq_freeze_queue_wait(q); +} + static void blk_mq_unfreeze_queue(struct request_queue *q) { bool wake; @@ -1921,7 +1931,7 @@ void blk_mq_free_queue(struct request_queue *q) /* Basically redo blk_mq_init_queue with queue frozen */ static void blk_mq_queue_reinit(struct request_queue *q) { - blk_mq_freeze_queue(q); + WARN_ON_ONCE(!q->mq_freeze_depth); blk_mq_sysfs_unregister(q); @@ -1936,8 +1946,6 @@ static void blk_mq_queue_reinit(struct request_queue *q) blk_mq_map_swqueue(q); blk_mq_sysfs_register(q); - - blk_mq_unfreeze_queue(q); } static int blk_mq_queue_reinit_notify(struct notifier_block *nb, @@ -1956,8 +1964,25 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb, return NOTIFY_OK; mutex_lock(&all_q_mutex); + + /* + * We need to freeze and reinit all existing queues. Freezing + * involves synchronous wait for an RCU grace period and doing it + * one by one may take a long time. Start freezing all queues in + * one swoop and then wait for the completions so that freezing can + * take place in parallel. + */ + list_for_each_entry(q, &all_q_list, all_q_node) + blk_mq_freeze_queue_start(q); + list_for_each_entry(q, &all_q_list, all_q_node) + blk_mq_freeze_queue_wait(q); + list_for_each_entry(q, &all_q_list, all_q_node) blk_mq_queue_reinit(q); + + list_for_each_entry(q, &all_q_list, all_q_node) + blk_mq_unfreeze_queue(q); + mutex_unlock(&all_q_mutex); return NOTIFY_OK; } diff --git a/block/ioprio.c b/block/ioprio.c index e50170ca7c33f446acc16e29a0d0097828919c30..31666c92b46af29919f42ea3e1093caed7127d71 100644 --- a/block/ioprio.c +++ b/block/ioprio.c @@ -157,14 +157,16 @@ static int get_task_ioprio(struct task_struct *p) int ioprio_best(unsigned short aprio, unsigned short bprio) { - unsigned short aclass = IOPRIO_PRIO_CLASS(aprio); - unsigned short bclass = IOPRIO_PRIO_CLASS(bprio); + unsigned short aclass; + unsigned short bclass; - if (aclass == IOPRIO_CLASS_NONE) - aclass = IOPRIO_CLASS_BE; - if (bclass == IOPRIO_CLASS_NONE) - bclass = IOPRIO_CLASS_BE; + if (!ioprio_valid(aprio)) + aprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM); + if (!ioprio_valid(bprio)) + bprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM); + aclass = IOPRIO_PRIO_CLASS(aprio); + bclass = IOPRIO_PRIO_CLASS(bprio); if (aclass == bclass) return min(aprio, bprio); if (aclass > bclass) diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index 1e053d911240b577df324912bb68af13e6174bcc..b0c2a616c8f9b859191c075526c473ea1df796bc 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -458,7 +458,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT); if (IS_ERR(rq)) { err = PTR_ERR(rq); - goto error; + goto error_free_buffer; } blk_rq_set_block_pc(rq); @@ -531,9 +531,11 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, } error: + blk_put_request(rq); + +error_free_buffer: kfree(buffer); - if (rq) - blk_put_request(rq); + return err; } EXPORT_SYMBOL_GPL(sg_scsi_ioctl);