blk-flush.c 12.7 KB
Newer Older
J
Jens Axboe 已提交
1
/*
2
 * Functions to sequence FLUSH and FUA writes.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
 *
 * Copyright (C) 2011		Max Planck Institute for Gravitational Physics
 * Copyright (C) 2011		Tejun Heo <tj@kernel.org>
 *
 * This file is released under the GPLv2.
 *
 * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three
 * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
 * properties and hardware capability.
 *
 * If a request doesn't have data, only REQ_FLUSH makes sense, which
 * indicates a simple flush request.  If there is data, REQ_FLUSH indicates
 * that the device cache should be flushed before the data is executed, and
 * REQ_FUA means that the data must be on non-volatile media on request
 * completion.
 *
 * If the device doesn't have writeback cache, FLUSH and FUA don't make any
 * difference.  The requests are either completed immediately if there's no
 * data or executed as normal requests otherwise.
 *
 * If the device has writeback cache and supports FUA, REQ_FLUSH is
 * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
 *
 * If the device has writeback cache and doesn't support FUA, REQ_FLUSH is
 * translated to PREFLUSH and REQ_FUA to POSTFLUSH.
 *
 * The actual execution of flush is double buffered.  Whenever a request
 * needs to execute PRE or POSTFLUSH, it queues at
 * q->flush_queue[q->flush_pending_idx].  Once certain criteria are met, a
 * flush is issued and the pending_idx is toggled.  When the flush
 * completes, all the requests which were pending are proceeded to the next
 * step.  This allows arbitrary merging of different types of FLUSH/FUA
 * requests.
 *
 * Currently, the following conditions are used to determine when to issue
 * flush.
 *
 * C1. At any given time, only one flush shall be in progress.  This makes
 *     double buffering sufficient.
 *
 * C2. Flush is deferred if any request is executing DATA of its sequence.
 *     This avoids issuing separate POSTFLUSHes for requests which shared
 *     PREFLUSH.
 *
 * C3. The second condition is ignored if there is a request which has
 *     waited longer than FLUSH_PENDING_TIMEOUT.  This is to avoid
 *     starvation in the unlikely case where there are continuous stream of
 *     FUA (without FLUSH) requests.
 *
 * For devices which support FUA, it isn't clear whether C2 (and thus C3)
 * is beneficial.
 *
 * Note that a sequenced FLUSH/FUA request with DATA is completed twice.
 * Once while executing DATA and again after the whole sequence is
 * complete.  The first completion updates the contained bio but doesn't
 * finish it so that the bio submitter is notified only after the whole
 * sequence is complete.  This is implemented by testing REQ_FLUSH_SEQ in
 * req_bio_endio().
 *
 * The above peculiarity requires that each FLUSH/FUA request has only one
 * bio attached to it, which is guaranteed as they aren't allowed to be
 * merged in the usual way.
J
Jens Axboe 已提交
65
 */
66

J
Jens Axboe 已提交
67 68 69 70
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
71
#include <linux/gfp.h>
J
Jens Axboe 已提交
72 73 74

#include "blk.h"

75 76
/* FLUSH/FUA sequences */
enum {
77 78 79 80 81 82 83 84 85 86 87 88 89
	REQ_FSEQ_PREFLUSH	= (1 << 0), /* pre-flushing in progress */
	REQ_FSEQ_DATA		= (1 << 1), /* data write in progress */
	REQ_FSEQ_POSTFLUSH	= (1 << 2), /* post-flushing in progress */
	REQ_FSEQ_DONE		= (1 << 3),

	REQ_FSEQ_ACTIONS	= REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
				  REQ_FSEQ_POSTFLUSH,

	/*
	 * If flush has been pending longer than the following timeout,
	 * it's issued even if flush_data requests are still in flight.
	 */
	FLUSH_PENDING_TIMEOUT	= 5 * HZ,
90 91
};

92
static bool blk_kick_flush(struct request_queue *q);
93

94
static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
J
Jens Axboe 已提交
95
{
96
	unsigned int policy = 0;
J
Jens Axboe 已提交
97

98 99 100 101 102 103 104
	if (fflags & REQ_FLUSH) {
		if (rq->cmd_flags & REQ_FLUSH)
			policy |= REQ_FSEQ_PREFLUSH;
		if (blk_rq_sectors(rq))
			policy |= REQ_FSEQ_DATA;
		if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA))
			policy |= REQ_FSEQ_POSTFLUSH;
105
	}
106
	return policy;
J
Jens Axboe 已提交
107 108
}

109
static unsigned int blk_flush_cur_seq(struct request *rq)
110
{
111 112
	return 1 << ffz(rq->flush.seq);
}
113

114 115
static void blk_flush_restore_request(struct request *rq)
{
116
	/*
117 118 119
	 * After flush data completion, @rq->bio is %NULL but we need to
	 * complete the bio again.  @rq->biotail is guaranteed to equal the
	 * original @rq->bio.  Restore it.
120
	 */
121 122 123 124 125
	rq->bio = rq->biotail;

	/* make @rq a normal request */
	rq->cmd_flags &= ~REQ_FLUSH_SEQ;
	rq->end_io = NULL;
126 127
}

128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
/**
 * blk_flush_complete_seq - complete flush sequence
 * @rq: FLUSH/FUA request being sequenced
 * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
 * @error: whether an error occurred
 *
 * @rq just completed @seq part of its flush sequence, record the
 * completion and trigger the next step.
 *
 * CONTEXT:
 * spin_lock_irq(q->queue_lock)
 *
 * RETURNS:
 * %true if requests were added to the dispatch queue, %false otherwise.
 */
static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
				   int error)
J
Jens Axboe 已提交
145
{
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
	struct request_queue *q = rq->q;
	struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
	bool queued = false;

	BUG_ON(rq->flush.seq & seq);
	rq->flush.seq |= seq;

	if (likely(!error))
		seq = blk_flush_cur_seq(rq);
	else
		seq = REQ_FSEQ_DONE;

	switch (seq) {
	case REQ_FSEQ_PREFLUSH:
	case REQ_FSEQ_POSTFLUSH:
		/* queue for flush */
		if (list_empty(pending))
			q->flush_pending_since = jiffies;
		list_move_tail(&rq->flush.list, pending);
		break;

	case REQ_FSEQ_DATA:
		list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
		list_add(&rq->queuelist, &q->queue_head);
		queued = true;
		break;

	case REQ_FSEQ_DONE:
		/*
		 * @rq was previously adjusted by blk_flush_issue() for
		 * flush sequencing and may already have gone through the
		 * flush data request completion path.  Restore @rq for
		 * normal completion and end it.
		 */
		BUG_ON(!list_empty(&rq->queuelist));
		list_del_init(&rq->flush.list);
		blk_flush_restore_request(rq);
		__blk_end_request_all(rq, error);
		break;

	default:
		BUG();
	}

	return blk_kick_flush(q) | queued;
J
Jens Axboe 已提交
191 192
}

193
static void flush_end_io(struct request *flush_rq, int error)
J
Jens Axboe 已提交
194
{
195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
	struct request_queue *q = flush_rq->q;
	struct list_head *running = &q->flush_queue[q->flush_running_idx];
	bool queued = false;
	struct request *rq, *n;

	BUG_ON(q->flush_pending_idx == q->flush_running_idx);

	/* account completion of the flush request */
	q->flush_running_idx ^= 1;
	elv_completed_request(q, flush_rq);

	/* and push the waiting requests to the next stage */
	list_for_each_entry_safe(rq, n, running, flush.list) {
		unsigned int seq = blk_flush_cur_seq(rq);

		BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
		queued |= blk_flush_complete_seq(rq, seq, error);
	}

214
	/*
215 216 217 218 219 220 221 222 223
	 * Kick the queue to avoid stall for two cases:
	 * 1. Moving a request silently to empty queue_head may stall the
	 * queue.
	 * 2. When flush request is running in non-queueable queue, the
	 * queue is hold. Restart the queue after flush request is finished
	 * to avoid stall.
	 * This function is called from request completion path and calling
	 * directly into request_fn may confuse the driver.  Always use
	 * kblockd.
224
	 */
225
	if (queued || q->flush_queue_delayed)
226
		blk_run_queue_async(q);
227
	q->flush_queue_delayed = 0;
J
Jens Axboe 已提交
228 229
}

230 231 232 233 234 235 236 237 238 239 240 241 242 243
/**
 * blk_kick_flush - consider issuing flush request
 * @q: request_queue being kicked
 *
 * Flush related states of @q have changed, consider issuing flush request.
 * Please read the comment at the top of this file for more info.
 *
 * CONTEXT:
 * spin_lock_irq(q->queue_lock)
 *
 * RETURNS:
 * %true if flush was issued, %false otherwise.
 */
static bool blk_kick_flush(struct request_queue *q)
J
Jens Axboe 已提交
244
{
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
	struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
	struct request *first_rq =
		list_first_entry(pending, struct request, flush.list);

	/* C1 described at the top of this file */
	if (q->flush_pending_idx != q->flush_running_idx || list_empty(pending))
		return false;

	/* C2 and C3 */
	if (!list_empty(&q->flush_data_in_flight) &&
	    time_before(jiffies,
			q->flush_pending_since + FLUSH_PENDING_TIMEOUT))
		return false;

	/*
	 * Issue flush and toggle pending_idx.  This makes pending_idx
	 * different from running_idx, which means flush is in flight.
	 */
	blk_rq_init(q, &q->flush_rq);
	q->flush_rq.cmd_type = REQ_TYPE_FS;
	q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
	q->flush_rq.rq_disk = first_rq->rq_disk;
	q->flush_rq.end_io = flush_end_io;

	q->flush_pending_idx ^= 1;
270
	list_add_tail(&q->flush_rq.queuelist, &q->queue_head);
271
	return true;
J
Jens Axboe 已提交
272 273
}

274
static void flush_data_end_io(struct request *rq, int error)
J
Jens Axboe 已提交
275
{
276 277
	struct request_queue *q = rq->q;

278 279 280 281
	/*
	 * After populating an empty queue, kick it to avoid stall.  Read
	 * the comment in flush_end_io().
	 */
282
	if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
283
		blk_run_queue_async(q);
J
Jens Axboe 已提交
284 285
}

286 287 288 289
/**
 * blk_insert_flush - insert a new FLUSH/FUA request
 * @rq: request to insert
 *
290
 * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
291 292 293 294 295 296 297
 * @rq is being submitted.  Analyze what needs to be done and put it on the
 * right queue.
 *
 * CONTEXT:
 * spin_lock_irq(q->queue_lock)
 */
void blk_insert_flush(struct request *rq)
J
Jens Axboe 已提交
298
{
299 300 301
	struct request_queue *q = rq->q;
	unsigned int fflags = q->flush_flags;	/* may change, cache */
	unsigned int policy = blk_flush_policy(fflags, rq);
J
Jens Axboe 已提交
302

303 304
	BUG_ON(rq->end_io);
	BUG_ON(!rq->bio || rq->bio != rq->biotail);
305

306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
	/*
	 * @policy now records what operations need to be done.  Adjust
	 * REQ_FLUSH and FUA for the driver.
	 */
	rq->cmd_flags &= ~REQ_FLUSH;
	if (!(fflags & REQ_FUA))
		rq->cmd_flags &= ~REQ_FUA;

	/*
	 * If there's data but flush is not necessary, the request can be
	 * processed directly without going through flush machinery.  Queue
	 * for normal execution.
	 */
	if ((policy & REQ_FSEQ_DATA) &&
	    !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
321
		list_add_tail(&rq->queuelist, &q->queue_head);
322
		return;
323
	}
324

325 326 327 328 329 330
	/*
	 * @rq should go through flush machinery.  Mark it part of flush
	 * sequence and submit for further processing.
	 */
	memset(&rq->flush, 0, sizeof(rq->flush));
	INIT_LIST_HEAD(&rq->flush.list);
T
Tejun Heo 已提交
331
	rq->cmd_flags |= REQ_FLUSH_SEQ;
332 333 334
	rq->end_io = flush_data_end_io;

	blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
J
Jens Axboe 已提交
335 336
}

337 338 339 340 341 342 343 344 345 346 347
/**
 * blk_abort_flushes - @q is being aborted, abort flush requests
 * @q: request_queue being aborted
 *
 * To be called from elv_abort_queue().  @q is being aborted.  Prepare all
 * FLUSH/FUA requests for abortion.
 *
 * CONTEXT:
 * spin_lock_irq(q->queue_lock)
 */
void blk_abort_flushes(struct request_queue *q)
J
Jens Axboe 已提交
348
{
349 350
	struct request *rq, *n;
	int i;
351

352
	/*
353 354
	 * Requests in flight for data are already owned by the dispatch
	 * queue or the device driver.  Just restore for normal completion.
355
	 */
356 357 358
	list_for_each_entry_safe(rq, n, &q->flush_data_in_flight, flush.list) {
		list_del_init(&rq->flush.list);
		blk_flush_restore_request(rq);
359
	}
360

361
	/*
362 363
	 * We need to give away requests on flush queues.  Restore for
	 * normal completion and put them on the dispatch queue.
364
	 */
365 366 367 368 369 370 371
	for (i = 0; i < ARRAY_SIZE(q->flush_queue); i++) {
		list_for_each_entry_safe(rq, n, &q->flush_queue[i],
					 flush.list) {
			list_del_init(&rq->flush.list);
			blk_flush_restore_request(rq);
			list_add_tail(&rq->queuelist, &q->queue_head);
		}
372
	}
J
Jens Axboe 已提交
373 374
}

375
static void bio_end_flush(struct bio *bio, int err)
J
Jens Axboe 已提交
376
{
377
	if (err)
J
Jens Axboe 已提交
378
		clear_bit(BIO_UPTODATE, &bio->bi_flags);
379 380 381
	if (bio->bi_private)
		complete(bio->bi_private);
	bio_put(bio);
J
Jens Axboe 已提交
382 383 384 385 386
}

/**
 * blkdev_issue_flush - queue a flush
 * @bdev:	blockdev to issue flush for
387
 * @gfp_mask:	memory allocation flags (for bio_alloc)
J
Jens Axboe 已提交
388 389 390 391 392
 * @error_sector:	error sector
 *
 * Description:
 *    Issue a flush for the block device in question. Caller can supply
 *    room for storing the error offset in case of a flush error, if they
393 394
 *    wish to. If WAIT flag is not passed then caller may check only what
 *    request was pushed in some internal queue for later handling.
J
Jens Axboe 已提交
395
 */
396
int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
397
		sector_t *error_sector)
J
Jens Axboe 已提交
398 399 400 401
{
	DECLARE_COMPLETION_ONSTACK(wait);
	struct request_queue *q;
	struct bio *bio;
402
	int ret = 0;
J
Jens Axboe 已提交
403 404 405 406 407 408 409 410

	if (bdev->bd_disk == NULL)
		return -ENXIO;

	q = bdev_get_queue(bdev);
	if (!q)
		return -ENXIO;

411 412 413 414
	/*
	 * some block devices may not have their queue correctly set up here
	 * (e.g. loop device without a backing file) and so issuing a flush
	 * here will panic. Ensure there is a request function before issuing
415
	 * the flush.
416 417 418 419
	 */
	if (!q->make_request_fn)
		return -ENXIO;

420
	bio = bio_alloc(gfp_mask, 0);
421
	bio->bi_end_io = bio_end_flush;
J
Jens Axboe 已提交
422
	bio->bi_bdev = bdev;
423
	bio->bi_private = &wait;
J
Jens Axboe 已提交
424

425
	bio_get(bio);
426
	submit_bio(WRITE_FLUSH, bio);
427 428 429 430 431 432 433 434 435
	wait_for_completion(&wait);

	/*
	 * The driver must store the error location in ->bi_sector, if
	 * it supports it. For non-stacked drivers, this should be
	 * copied from blk_rq_pos(rq).
	 */
	if (error_sector)
               *error_sector = bio->bi_sector;
J
Jens Axboe 已提交
436

437
	if (!bio_flagged(bio, BIO_UPTODATE))
J
Jens Axboe 已提交
438 439 440 441 442 443
		ret = -EIO;

	bio_put(bio);
	return ret;
}
EXPORT_SYMBOL(blkdev_issue_flush);