blk-barrier.c 10.7 KB
Newer Older
J
Jens Axboe 已提交
1 2 3 4 5 6 7
/*
 * Functions related to barrier IO handling
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
8
#include <linux/gfp.h>
J
Jens Axboe 已提交
9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27

#include "blk.h"

/**
 * blk_queue_ordered - does this queue support ordered writes
 * @q:        the request queue
 * @ordered:  one of QUEUE_ORDERED_*
 * @prepare_flush_fn: rq setup helper for cache flush ordered writes
 *
 * Description:
 *   For journalled file systems, doing ordered writes on a commit
 *   block instead of explicitly doing wait_on_buffer (which is bad
 *   for performance) can be a big win. Block drivers supporting this
 *   feature should call this function and indicate so.
 *
 **/
int blk_queue_ordered(struct request_queue *q, unsigned ordered,
		      prepare_flush_fn *prepare_flush_fn)
{
28 29
	if (!prepare_flush_fn && (ordered & (QUEUE_ORDERED_DO_PREFLUSH |
					     QUEUE_ORDERED_DO_POSTFLUSH))) {
30
		printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__);
J
Jens Axboe 已提交
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
		return -EINVAL;
	}

	if (ordered != QUEUE_ORDERED_NONE &&
	    ordered != QUEUE_ORDERED_DRAIN &&
	    ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
	    ordered != QUEUE_ORDERED_DRAIN_FUA &&
	    ordered != QUEUE_ORDERED_TAG &&
	    ordered != QUEUE_ORDERED_TAG_FLUSH &&
	    ordered != QUEUE_ORDERED_TAG_FUA) {
		printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered);
		return -EINVAL;
	}

	q->ordered = ordered;
	q->next_ordered = ordered;
	q->prepare_flush_fn = prepare_flush_fn;

	return 0;
}
EXPORT_SYMBOL(blk_queue_ordered);

/*
 * Cache flushing for ordered writes handling
 */
56
unsigned blk_ordered_cur_seq(struct request_queue *q)
J
Jens Axboe 已提交
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
{
	if (!q->ordseq)
		return 0;
	return 1 << ffz(q->ordseq);
}

unsigned blk_ordered_req_seq(struct request *rq)
{
	struct request_queue *q = rq->q;

	BUG_ON(q->ordseq == 0);

	if (rq == &q->pre_flush_rq)
		return QUEUE_ORDSEQ_PREFLUSH;
	if (rq == &q->bar_rq)
		return QUEUE_ORDSEQ_BAR;
	if (rq == &q->post_flush_rq)
		return QUEUE_ORDSEQ_POSTFLUSH;

	/*
	 * !fs requests don't need to follow barrier ordering.  Always
	 * put them at the front.  This fixes the following deadlock.
	 *
	 * http://thread.gmane.org/gmane.linux.kernel/537473
	 */
	if (!blk_fs_request(rq))
		return QUEUE_ORDSEQ_DRAIN;

	if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
	    (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR))
		return QUEUE_ORDSEQ_DRAIN;
	else
		return QUEUE_ORDSEQ_DONE;
}

92
bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
J
Jens Axboe 已提交
93 94 95 96 97 98 99 100 101 102
{
	struct request *rq;

	if (error && !q->orderr)
		q->orderr = error;

	BUG_ON(q->ordseq & seq);
	q->ordseq |= seq;

	if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
103
		return false;
J
Jens Axboe 已提交
104 105 106 107 108 109

	/*
	 * Okay, sequence complete.
	 */
	q->ordseq = 0;
	rq = q->orig_bar_rq;
110
	__blk_end_request_all(rq, q->orderr);
111
	return true;
J
Jens Axboe 已提交
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
}

static void pre_flush_end_io(struct request *rq, int error)
{
	elv_completed_request(rq->q, rq);
	blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error);
}

static void bar_end_io(struct request *rq, int error)
{
	elv_completed_request(rq->q, rq);
	blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error);
}

static void post_flush_end_io(struct request *rq, int error)
{
	elv_completed_request(rq->q, rq);
	blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
}

static void queue_flush(struct request_queue *q, unsigned which)
{
	struct request *rq;
	rq_end_io_fn *end_io;

137
	if (which == QUEUE_ORDERED_DO_PREFLUSH) {
J
Jens Axboe 已提交
138 139 140 141 142 143 144
		rq = &q->pre_flush_rq;
		end_io = pre_flush_end_io;
	} else {
		rq = &q->post_flush_rq;
		end_io = post_flush_end_io;
	}

145
	blk_rq_init(q, rq);
146
	rq->cmd_flags = REQ_HARDBARRIER;
J
Jens Axboe 已提交
147 148 149 150 151 152 153
	rq->rq_disk = q->bar_rq.rq_disk;
	rq->end_io = end_io;
	q->prepare_flush_fn(q, rq);

	elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
}

154
static inline bool start_ordered(struct request_queue *q, struct request **rqp)
J
Jens Axboe 已提交
155
{
156 157 158
	struct request *rq = *rqp;
	unsigned skip = 0;

J
Jens Axboe 已提交
159 160 161 162
	q->orderr = 0;
	q->ordered = q->next_ordered;
	q->ordseq |= QUEUE_ORDSEQ_STARTED;

163 164 165 166
	/*
	 * For an empty barrier, there's no actual BAR request, which
	 * in turn makes POSTFLUSH unnecessary.  Mask them off.
	 */
167
	if (!blk_rq_sectors(rq)) {
168 169
		q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
				QUEUE_ORDERED_DO_POSTFLUSH);
170 171 172 173 174 175 176 177 178 179 180 181
		/*
		 * Empty barrier on a write-through device w/ ordered
		 * tag has no command to issue and without any command
		 * to issue, ordering by tag can't be used.  Drain
		 * instead.
		 */
		if ((q->ordered & QUEUE_ORDERED_BY_TAG) &&
		    !(q->ordered & QUEUE_ORDERED_DO_PREFLUSH)) {
			q->ordered &= ~QUEUE_ORDERED_BY_TAG;
			q->ordered |= QUEUE_ORDERED_BY_DRAIN;
		}
	}
182

183
	/* stash away the original request */
184
	blk_dequeue_request(rq);
J
Jens Axboe 已提交
185
	q->orig_bar_rq = rq;
186
	rq = NULL;
J
Jens Axboe 已提交
187 188 189 190 191

	/*
	 * Queue ordered sequence.  As we stack them at the head, we
	 * need to queue in reverse order.  Note that we rely on that
	 * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
192
	 * request gets inbetween ordered sequence.
J
Jens Axboe 已提交
193
	 */
194
	if (q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) {
195
		queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH);
196 197
		rq = &q->post_flush_rq;
	} else
198
		skip |= QUEUE_ORDSEQ_POSTFLUSH;
J
Jens Axboe 已提交
199

200 201 202 203 204 205 206 207 208 209 210 211 212 213
	if (q->ordered & QUEUE_ORDERED_DO_BAR) {
		rq = &q->bar_rq;

		/* initialize proxy request and queue it */
		blk_rq_init(q, rq);
		if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
			rq->cmd_flags |= REQ_RW;
		if (q->ordered & QUEUE_ORDERED_DO_FUA)
			rq->cmd_flags |= REQ_FUA;
		init_request_from_bio(rq, q->orig_bar_rq->bio);
		rq->end_io = bar_end_io;

		elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
	} else
214
		skip |= QUEUE_ORDSEQ_BAR;
J
Jens Axboe 已提交
215

216 217
	if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) {
		queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH);
J
Jens Axboe 已提交
218 219
		rq = &q->pre_flush_rq;
	} else
220
		skip |= QUEUE_ORDSEQ_PREFLUSH;
J
Jens Axboe 已提交
221

222
	if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && queue_in_flight(q))
J
Jens Axboe 已提交
223
		rq = NULL;
224
	else
225
		skip |= QUEUE_ORDSEQ_DRAIN;
J
Jens Axboe 已提交
226

227 228 229 230 231 232 233
	*rqp = rq;

	/*
	 * Complete skipped sequences.  If whole sequence is complete,
	 * return false to tell elevator that this request is gone.
	 */
	return !blk_ordered_complete_seq(q, skip, 0);
J
Jens Axboe 已提交
234 235
}

236
bool blk_do_ordered(struct request_queue *q, struct request **rqp)
J
Jens Axboe 已提交
237 238 239 240 241 242
{
	struct request *rq = *rqp;
	const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);

	if (!q->ordseq) {
		if (!is_barrier)
243
			return true;
J
Jens Axboe 已提交
244

245 246 247
		if (q->next_ordered != QUEUE_ORDERED_NONE)
			return start_ordered(q, rqp);
		else {
J
Jens Axboe 已提交
248
			/*
249 250
			 * Queue ordering not supported.  Terminate
			 * with prejudice.
J
Jens Axboe 已提交
251
			 */
252
			blk_dequeue_request(rq);
253
			__blk_end_request_all(rq, -EOPNOTSUPP);
J
Jens Axboe 已提交
254
			*rqp = NULL;
255
			return false;
J
Jens Axboe 已提交
256 257 258 259 260 261 262 263 264 265
		}
	}

	/*
	 * Ordered sequence in progress
	 */

	/* Special requests are not subject to ordering rules. */
	if (!blk_fs_request(rq) &&
	    rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
266
		return true;
J
Jens Axboe 已提交
267

268
	if (q->ordered & QUEUE_ORDERED_BY_TAG) {
J
Jens Axboe 已提交
269 270 271 272 273 274 275 276 277 278
		/* Ordered by tag.  Blocking the next barrier is enough. */
		if (is_barrier && rq != &q->bar_rq)
			*rqp = NULL;
	} else {
		/* Ordered by draining.  Wait for turn. */
		WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
		if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
			*rqp = NULL;
	}

279
	return true;
J
Jens Axboe 已提交
280 281 282 283
}

static void bio_end_empty_barrier(struct bio *bio, int err)
{
284 285 286
	if (err) {
		if (err == -EOPNOTSUPP)
			set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
J
Jens Axboe 已提交
287
		clear_bit(BIO_UPTODATE, &bio->bi_flags);
288
	}
J
Jens Axboe 已提交
289 290 291 292 293 294 295 296 297 298 299 300

	complete(bio->bi_private);
}

/**
 * blkdev_issue_flush - queue a flush
 * @bdev:	blockdev to issue flush for
 * @error_sector:	error sector
 *
 * Description:
 *    Issue a flush for the block device in question. Caller can supply
 *    room for storing the error offset in case of a flush error, if they
301
 *    wish to.
J
Jens Axboe 已提交
302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
 */
int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	struct request_queue *q;
	struct bio *bio;
	int ret;

	if (bdev->bd_disk == NULL)
		return -ENXIO;

	q = bdev_get_queue(bdev);
	if (!q)
		return -ENXIO;

	bio = bio_alloc(GFP_KERNEL, 0);
	bio->bi_end_io = bio_end_empty_barrier;
	bio->bi_private = &wait;
	bio->bi_bdev = bdev;
321
	submit_bio(WRITE_BARRIER, bio);
J
Jens Axboe 已提交
322 323 324 325 326 327

	wait_for_completion(&wait);

	/*
	 * The driver must store the error location in ->bi_sector, if
	 * it supports it. For non-stacked drivers, this should be copied
328
	 * from blk_rq_pos(rq).
J
Jens Axboe 已提交
329 330 331 332 333
	 */
	if (error_sector)
		*error_sector = bio->bi_sector;

	ret = 0;
334 335 336
	if (bio_flagged(bio, BIO_EOPNOTSUPP))
		ret = -EOPNOTSUPP;
	else if (!bio_flagged(bio, BIO_UPTODATE))
J
Jens Axboe 已提交
337 338 339 340 341 342
		ret = -EIO;

	bio_put(bio);
	return ret;
}
EXPORT_SYMBOL(blkdev_issue_flush);
D
David Woodhouse 已提交
343 344 345 346 347 348 349 350 351

static void blkdev_discard_end_io(struct bio *bio, int err)
{
	if (err) {
		if (err == -EOPNOTSUPP)
			set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
		clear_bit(BIO_UPTODATE, &bio->bi_flags);
	}

352 353
	if (bio->bi_private)
		complete(bio->bi_private);
354
	__free_page(bio_page(bio));
355

D
David Woodhouse 已提交
356 357 358 359 360 361 362 363
	bio_put(bio);
}

/**
 * blkdev_issue_discard - queue a discard
 * @bdev:	blockdev to issue discard for
 * @sector:	start sector
 * @nr_sects:	number of sectors to discard
364
 * @gfp_mask:	memory allocation flags (for bio_alloc)
365
 * @flags:	DISCARD_FL_* flags to control behaviour
D
David Woodhouse 已提交
366 367
 *
 * Description:
368
 *    Issue a discard request for the sectors in question.
D
David Woodhouse 已提交
369
 */
370 371
int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
		sector_t nr_sects, gfp_t gfp_mask, int flags)
D
David Woodhouse 已提交
372
{
373 374 375 376
	DECLARE_COMPLETION_ONSTACK(wait);
	struct request_queue *q = bdev_get_queue(bdev);
	int type = flags & DISCARD_FL_BARRIER ?
		DISCARD_BARRIER : DISCARD_NOBARRIER;
377 378
	struct bio *bio;
	struct page *page;
D
David Woodhouse 已提交
379 380 381 382 383
	int ret = 0;

	if (!q)
		return -ENXIO;

384
	if (!blk_queue_discard(q))
D
David Woodhouse 已提交
385 386 387
		return -EOPNOTSUPP;

	while (nr_sects && !ret) {
388
		unsigned int sector_size = q->limits.logical_block_size;
389 390
		unsigned int max_discard_sectors =
			min(q->limits.max_discard_sectors, UINT_MAX >> 9);
D
David Woodhouse 已提交
391

392 393 394 395
		bio = bio_alloc(gfp_mask, 1);
		if (!bio)
			goto out;
		bio->bi_sector = sector;
D
David Woodhouse 已提交
396 397
		bio->bi_end_io = blkdev_discard_end_io;
		bio->bi_bdev = bdev;
398 399
		if (flags & DISCARD_FL_WAIT)
			bio->bi_private = &wait;
D
David Woodhouse 已提交
400

401 402 403 404 405
		/*
		 * Add a zeroed one-sector payload as that's what
		 * our current implementations need.  If we'll ever need
		 * more the interface will need revisiting.
		 */
406
		page = alloc_page(gfp_mask | __GFP_ZERO);
407 408 409 410
		if (!page)
			goto out_free_bio;
		if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size)
			goto out_free_page;
D
David Woodhouse 已提交
411

412 413 414 415 416
		/*
		 * And override the bio size - the way discard works we
		 * touch many more blocks on disk than the actual payload
		 * length.
		 */
417 418 419 420
		if (nr_sects > max_discard_sectors) {
			bio->bi_size = max_discard_sectors << 9;
			nr_sects -= max_discard_sectors;
			sector += max_discard_sectors;
D
David Woodhouse 已提交
421 422 423 424
		} else {
			bio->bi_size = nr_sects << 9;
			nr_sects = 0;
		}
425

D
David Woodhouse 已提交
426
		bio_get(bio);
427 428 429 430
		submit_bio(type, bio);

		if (flags & DISCARD_FL_WAIT)
			wait_for_completion(&wait);
D
David Woodhouse 已提交
431 432 433 434 435 436 437 438

		if (bio_flagged(bio, BIO_EOPNOTSUPP))
			ret = -EOPNOTSUPP;
		else if (!bio_flagged(bio, BIO_UPTODATE))
			ret = -EIO;
		bio_put(bio);
	}
	return ret;
439 440 441 442 443 444
out_free_page:
	__free_page(page);
out_free_bio:
	bio_put(bio);
out:
	return -ENOMEM;
D
David Woodhouse 已提交
445 446
}
EXPORT_SYMBOL(blkdev_issue_discard);