提交 2a75184d 编写于 作者: L Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "Small collection of fixes for 3.14-rc. It contains:

   - Three minor update to blk-mq from Christoph.

   - Reduce number of unaligned (< 4kb) in-flight writes on mtip32xx to
     two.  From Micron.

   - Make the blk-mq CPU notify spinlock raw, since it can't be a
     sleeper spinlock on RT.  From Mike Galbraith.

   - Drop now bogus BUG_ON() for bio iteration with blk integrity.  From
     Nic Bellinger.

   - Properly propagate the SYNC flag on requests. From Shaohua"

* 'for-linus' of git://git.kernel.dk/linux-block:
  blk-mq: add REQ_SYNC early
  rt,blk,mq: Make blk_mq_cpu_notify_lock a raw spinlock
  bio-integrity: Drop bio_integrity_verify BUG_ON in post bip->bip_iter world
  blk-mq: support partial I/O completions
  blk-mq: merge blk_mq_insert_request and blk_mq_run_request
  blk-mq: remove blk_mq_alloc_rq
  mtip32xx: Reduce the number of unaligned writes to 2
......@@ -65,7 +65,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
* be resued after dying flag is set
*/
if (q->mq_ops) {
blk_mq_insert_request(q, rq, at_head, true);
blk_mq_insert_request(rq, at_head, true, false);
return;
}
......
......@@ -137,7 +137,7 @@ static void mq_flush_run(struct work_struct *work)
rq = container_of(work, struct request, mq_flush_work);
memset(&rq->csd, 0, sizeof(rq->csd));
blk_mq_run_request(rq, true, false);
blk_mq_insert_request(rq, false, true, false);
}
static bool blk_flush_queue_rq(struct request *rq)
......@@ -411,7 +411,7 @@ void blk_insert_flush(struct request *rq)
if ((policy & REQ_FSEQ_DATA) &&
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
if (q->mq_ops) {
blk_mq_run_request(rq, false, true);
blk_mq_insert_request(rq, false, false, true);
} else
list_add_tail(&rq->queuelist, &q->queue_head);
return;
......
......@@ -11,7 +11,7 @@
#include "blk-mq.h"
static LIST_HEAD(blk_mq_cpu_notify_list);
static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock);
static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock);
static int blk_mq_main_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
......@@ -19,12 +19,12 @@ static int blk_mq_main_cpu_notify(struct notifier_block *self,
unsigned int cpu = (unsigned long) hcpu;
struct blk_mq_cpu_notifier *notify;
spin_lock(&blk_mq_cpu_notify_lock);
raw_spin_lock(&blk_mq_cpu_notify_lock);
list_for_each_entry(notify, &blk_mq_cpu_notify_list, list)
notify->notify(notify->data, action, cpu);
spin_unlock(&blk_mq_cpu_notify_lock);
raw_spin_unlock(&blk_mq_cpu_notify_lock);
return NOTIFY_OK;
}
......@@ -32,16 +32,16 @@ void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
{
BUG_ON(!notifier->notify);
spin_lock(&blk_mq_cpu_notify_lock);
raw_spin_lock(&blk_mq_cpu_notify_lock);
list_add_tail(&notifier->list, &blk_mq_cpu_notify_list);
spin_unlock(&blk_mq_cpu_notify_lock);
raw_spin_unlock(&blk_mq_cpu_notify_lock);
}
void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
{
spin_lock(&blk_mq_cpu_notify_lock);
raw_spin_lock(&blk_mq_cpu_notify_lock);
list_del(&notifier->list);
spin_unlock(&blk_mq_cpu_notify_lock);
raw_spin_unlock(&blk_mq_cpu_notify_lock);
}
void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
......
......@@ -73,8 +73,8 @@ static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
set_bit(ctx->index_hw, hctx->ctx_map);
}
static struct request *blk_mq_alloc_rq(struct blk_mq_hw_ctx *hctx, gfp_t gfp,
bool reserved)
static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
gfp_t gfp, bool reserved)
{
struct request *rq;
unsigned int tag;
......@@ -193,12 +193,6 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
}
static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
gfp_t gfp, bool reserved)
{
return blk_mq_alloc_rq(hctx, gfp, reserved);
}
static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
int rw, gfp_t gfp,
bool reserved)
......@@ -289,38 +283,10 @@ void blk_mq_free_request(struct request *rq)
__blk_mq_free_request(hctx, ctx, rq);
}
static void blk_mq_bio_endio(struct request *rq, struct bio *bio, int error)
{
if (error)
clear_bit(BIO_UPTODATE, &bio->bi_flags);
else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
error = -EIO;
if (unlikely(rq->cmd_flags & REQ_QUIET))
set_bit(BIO_QUIET, &bio->bi_flags);
/* don't actually finish bio if it's part of flush sequence */
if (!(rq->cmd_flags & REQ_FLUSH_SEQ))
bio_endio(bio, error);
}
void blk_mq_end_io(struct request *rq, int error)
bool blk_mq_end_io_partial(struct request *rq, int error, unsigned int nr_bytes)
{
struct bio *bio = rq->bio;
unsigned int bytes = 0;
trace_block_rq_complete(rq->q, rq);
while (bio) {
struct bio *next = bio->bi_next;
bio->bi_next = NULL;
bytes += bio->bi_iter.bi_size;
blk_mq_bio_endio(rq, bio, error);
bio = next;
}
blk_account_io_completion(rq, bytes);
if (blk_update_request(rq, error, blk_rq_bytes(rq)))
return true;
blk_account_io_done(rq);
......@@ -328,8 +294,9 @@ void blk_mq_end_io(struct request *rq, int error)
rq->end_io(rq, error);
else
blk_mq_free_request(rq);
return false;
}
EXPORT_SYMBOL(blk_mq_end_io);
EXPORT_SYMBOL(blk_mq_end_io_partial);
static void __blk_mq_complete_request_remote(void *data)
{
......@@ -730,60 +697,27 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
blk_mq_add_timer(rq);
}
void blk_mq_insert_request(struct request_queue *q, struct request *rq,
bool at_head, bool run_queue)
void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
bool async)
{
struct request_queue *q = rq->q;
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx, *current_ctx;
struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
current_ctx = blk_mq_get_ctx(q);
if (!cpu_online(ctx->cpu))
rq->mq_ctx = ctx = current_ctx;
ctx = rq->mq_ctx;
hctx = q->mq_ops->map_queue(q, ctx->cpu);
if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
!(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
blk_insert_flush(rq);
} else {
current_ctx = blk_mq_get_ctx(q);
if (!cpu_online(ctx->cpu)) {
ctx = current_ctx;
hctx = q->mq_ops->map_queue(q, ctx->cpu);
rq->mq_ctx = ctx;
}
spin_lock(&ctx->lock);
__blk_mq_insert_request(hctx, rq, at_head);
spin_unlock(&ctx->lock);
blk_mq_put_ctx(current_ctx);
}
if (run_queue)
__blk_mq_run_hw_queue(hctx);
}
EXPORT_SYMBOL(blk_mq_insert_request);
/*
* This is a special version of blk_mq_insert_request to bypass FLUSH request
* check. Should only be used internally.
*/
void blk_mq_run_request(struct request *rq, bool run_queue, bool async)
{
struct request_queue *q = rq->q;
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx, *current_ctx;
current_ctx = blk_mq_get_ctx(q);
ctx = rq->mq_ctx;
if (!cpu_online(ctx->cpu)) {
ctx = current_ctx;
rq->mq_ctx = ctx;
}
hctx = q->mq_ops->map_queue(q, ctx->cpu);
/* ctx->cpu might be offline */
spin_lock(&ctx->lock);
__blk_mq_insert_request(hctx, rq, false);
spin_unlock(&ctx->lock);
blk_mq_put_ctx(current_ctx);
......@@ -926,6 +860,8 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
ctx = blk_mq_get_ctx(q);
hctx = q->mq_ops->map_queue(q, ctx->cpu);
if (is_sync)
rw |= REQ_SYNC;
trace_block_getrq(q, bio, rw);
rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false);
if (likely(rq))
......
......@@ -23,7 +23,6 @@ struct blk_mq_ctx {
};
void __blk_mq_complete_request(struct request *rq);
void blk_mq_run_request(struct request *rq, bool run_queue, bool async);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_init_flush(struct request_queue *q);
void blk_mq_drain_queue(struct request_queue *q);
......
......@@ -53,7 +53,7 @@
#define MTIP_FTL_REBUILD_TIMEOUT_MS 2400000
/* unaligned IO handling */
#define MTIP_MAX_UNALIGNED_SLOTS 8
#define MTIP_MAX_UNALIGNED_SLOTS 2
/* Macro to extract the tag bit number from a tag value. */
#define MTIP_TAG_BIT(tag) (tag & 0x1F)
......
......@@ -458,11 +458,10 @@ static int bio_integrity_verify(struct bio *bio)
struct blk_integrity_exchg bix;
struct bio_vec *bv;
sector_t sector = bio->bi_integrity->bip_iter.bi_sector;
unsigned int sectors, total, ret;
unsigned int sectors, ret = 0;
void *prot_buf = bio->bi_integrity->bip_buf;
int i;
ret = total = 0;
bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
bix.sector_size = bi->sector_size;
......@@ -484,8 +483,6 @@ static int bio_integrity_verify(struct bio *bio)
sectors = bv->bv_len / bi->sector_size;
sector += sectors;
prot_buf += sectors * bi->tuple_size;
total += sectors * bi->tuple_size;
BUG_ON(total > bio->bi_integrity->bip_iter.bi_size);
kunmap_atomic(kaddr);
}
......
......@@ -121,8 +121,7 @@ void blk_mq_init_commands(struct request_queue *, void (*init)(void *data, struc
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
void blk_mq_insert_request(struct request_queue *, struct request *,
bool, bool);
void blk_mq_insert_request(struct request *, bool, bool, bool);
void blk_mq_run_queues(struct request_queue *q, bool async);
void blk_mq_free_request(struct request *rq);
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
......@@ -134,7 +133,13 @@ struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_ind
struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *, unsigned int);
void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int);
void blk_mq_end_io(struct request *rq, int error);
bool blk_mq_end_io_partial(struct request *rq, int error,
unsigned int nr_bytes);
static inline void blk_mq_end_io(struct request *rq, int error)
{
bool done = !blk_mq_end_io_partial(rq, error, blk_rq_bytes(rq));
BUG_ON(!done);
}
void blk_mq_complete_request(struct request *rq);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册