blk-barrier.c 8.6 KB
Newer Older
J
Jens Axboe 已提交
1 2 3 4 5 6 7
/*
 * Functions related to barrier IO handling
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
8
#include <linux/gfp.h>
J
Jens Axboe 已提交
9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27

#include "blk.h"

/**
 * blk_queue_ordered - does this queue support ordered writes
 * @q:        the request queue
 * @ordered:  one of QUEUE_ORDERED_*
 * @prepare_flush_fn: rq setup helper for cache flush ordered writes
 *
 * Description:
 *   For journalled file systems, doing ordered writes on a commit
 *   block instead of explicitly doing wait_on_buffer (which is bad
 *   for performance) can be a big win. Block drivers supporting this
 *   feature should call this function and indicate so.
 *
 **/
int blk_queue_ordered(struct request_queue *q, unsigned ordered,
		      prepare_flush_fn *prepare_flush_fn)
{
28 29
	if (!prepare_flush_fn && (ordered & (QUEUE_ORDERED_DO_PREFLUSH |
					     QUEUE_ORDERED_DO_POSTFLUSH))) {
30
		printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__);
J
Jens Axboe 已提交
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
		return -EINVAL;
	}

	if (ordered != QUEUE_ORDERED_NONE &&
	    ordered != QUEUE_ORDERED_DRAIN &&
	    ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
	    ordered != QUEUE_ORDERED_DRAIN_FUA &&
	    ordered != QUEUE_ORDERED_TAG &&
	    ordered != QUEUE_ORDERED_TAG_FLUSH &&
	    ordered != QUEUE_ORDERED_TAG_FUA) {
		printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered);
		return -EINVAL;
	}

	q->ordered = ordered;
	q->next_ordered = ordered;
	q->prepare_flush_fn = prepare_flush_fn;

	return 0;
}
EXPORT_SYMBOL(blk_queue_ordered);

/*
 * Cache flushing for ordered writes handling
 */
56
unsigned blk_ordered_cur_seq(struct request_queue *q)
J
Jens Axboe 已提交
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
{
	if (!q->ordseq)
		return 0;
	return 1 << ffz(q->ordseq);
}

unsigned blk_ordered_req_seq(struct request *rq)
{
	struct request_queue *q = rq->q;

	BUG_ON(q->ordseq == 0);

	if (rq == &q->pre_flush_rq)
		return QUEUE_ORDSEQ_PREFLUSH;
	if (rq == &q->bar_rq)
		return QUEUE_ORDSEQ_BAR;
	if (rq == &q->post_flush_rq)
		return QUEUE_ORDSEQ_POSTFLUSH;

	/*
	 * !fs requests don't need to follow barrier ordering.  Always
	 * put them at the front.  This fixes the following deadlock.
	 *
	 * http://thread.gmane.org/gmane.linux.kernel/537473
	 */
82
	if (rq->cmd_type != REQ_TYPE_FS)
J
Jens Axboe 已提交
83 84 85 86 87 88 89 90 91
		return QUEUE_ORDSEQ_DRAIN;

	if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
	    (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR))
		return QUEUE_ORDSEQ_DRAIN;
	else
		return QUEUE_ORDSEQ_DONE;
}

92
bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
J
Jens Axboe 已提交
93 94 95 96 97 98 99 100 101 102
{
	struct request *rq;

	if (error && !q->orderr)
		q->orderr = error;

	BUG_ON(q->ordseq & seq);
	q->ordseq |= seq;

	if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
103
		return false;
J
Jens Axboe 已提交
104 105 106 107 108 109

	/*
	 * Okay, sequence complete.
	 */
	q->ordseq = 0;
	rq = q->orig_bar_rq;
110
	__blk_end_request_all(rq, q->orderr);
111
	return true;
J
Jens Axboe 已提交
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
}

static void pre_flush_end_io(struct request *rq, int error)
{
	elv_completed_request(rq->q, rq);
	blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error);
}

static void bar_end_io(struct request *rq, int error)
{
	elv_completed_request(rq->q, rq);
	blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error);
}

static void post_flush_end_io(struct request *rq, int error)
{
	elv_completed_request(rq->q, rq);
	blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
}

static void queue_flush(struct request_queue *q, unsigned which)
{
	struct request *rq;
	rq_end_io_fn *end_io;

137
	if (which == QUEUE_ORDERED_DO_PREFLUSH) {
J
Jens Axboe 已提交
138 139 140 141 142 143 144
		rq = &q->pre_flush_rq;
		end_io = pre_flush_end_io;
	} else {
		rq = &q->post_flush_rq;
		end_io = post_flush_end_io;
	}

145
	blk_rq_init(q, rq);
146
	rq->cmd_flags = REQ_HARDBARRIER;
J
Jens Axboe 已提交
147 148 149 150 151 152 153
	rq->rq_disk = q->bar_rq.rq_disk;
	rq->end_io = end_io;
	q->prepare_flush_fn(q, rq);

	elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
}

154
static inline bool start_ordered(struct request_queue *q, struct request **rqp)
J
Jens Axboe 已提交
155
{
156 157 158
	struct request *rq = *rqp;
	unsigned skip = 0;

J
Jens Axboe 已提交
159 160 161 162
	q->orderr = 0;
	q->ordered = q->next_ordered;
	q->ordseq |= QUEUE_ORDSEQ_STARTED;

163 164 165 166
	/*
	 * For an empty barrier, there's no actual BAR request, which
	 * in turn makes POSTFLUSH unnecessary.  Mask them off.
	 */
167
	if (!blk_rq_sectors(rq)) {
168 169
		q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
				QUEUE_ORDERED_DO_POSTFLUSH);
170 171 172 173 174 175 176 177 178 179 180 181
		/*
		 * Empty barrier on a write-through device w/ ordered
		 * tag has no command to issue and without any command
		 * to issue, ordering by tag can't be used.  Drain
		 * instead.
		 */
		if ((q->ordered & QUEUE_ORDERED_BY_TAG) &&
		    !(q->ordered & QUEUE_ORDERED_DO_PREFLUSH)) {
			q->ordered &= ~QUEUE_ORDERED_BY_TAG;
			q->ordered |= QUEUE_ORDERED_BY_DRAIN;
		}
	}
182

183
	/* stash away the original request */
184
	blk_dequeue_request(rq);
J
Jens Axboe 已提交
185
	q->orig_bar_rq = rq;
186
	rq = NULL;
J
Jens Axboe 已提交
187 188 189 190 191

	/*
	 * Queue ordered sequence.  As we stack them at the head, we
	 * need to queue in reverse order.  Note that we rely on that
	 * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
192
	 * request gets inbetween ordered sequence.
J
Jens Axboe 已提交
193
	 */
194
	if (q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) {
195
		queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH);
196 197
		rq = &q->post_flush_rq;
	} else
198
		skip |= QUEUE_ORDSEQ_POSTFLUSH;
J
Jens Axboe 已提交
199

200 201 202 203 204 205 206 207 208 209 210 211 212 213
	if (q->ordered & QUEUE_ORDERED_DO_BAR) {
		rq = &q->bar_rq;

		/* initialize proxy request and queue it */
		blk_rq_init(q, rq);
		if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
			rq->cmd_flags |= REQ_RW;
		if (q->ordered & QUEUE_ORDERED_DO_FUA)
			rq->cmd_flags |= REQ_FUA;
		init_request_from_bio(rq, q->orig_bar_rq->bio);
		rq->end_io = bar_end_io;

		elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
	} else
214
		skip |= QUEUE_ORDSEQ_BAR;
J
Jens Axboe 已提交
215

216 217
	if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) {
		queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH);
J
Jens Axboe 已提交
218 219
		rq = &q->pre_flush_rq;
	} else
220
		skip |= QUEUE_ORDSEQ_PREFLUSH;
J
Jens Axboe 已提交
221

222
	if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && queue_in_flight(q))
J
Jens Axboe 已提交
223
		rq = NULL;
224
	else
225
		skip |= QUEUE_ORDSEQ_DRAIN;
J
Jens Axboe 已提交
226

227 228 229 230 231 232 233
	*rqp = rq;

	/*
	 * Complete skipped sequences.  If whole sequence is complete,
	 * return false to tell elevator that this request is gone.
	 */
	return !blk_ordered_complete_seq(q, skip, 0);
J
Jens Axboe 已提交
234 235
}

236
bool blk_do_ordered(struct request_queue *q, struct request **rqp)
J
Jens Axboe 已提交
237 238
{
	struct request *rq = *rqp;
239 240
	const int is_barrier = rq->cmd_type == REQ_TYPE_FS &&
				(rq->cmd_flags & REQ_HARDBARRIER);
J
Jens Axboe 已提交
241 242 243

	if (!q->ordseq) {
		if (!is_barrier)
244
			return true;
J
Jens Axboe 已提交
245

246 247 248
		if (q->next_ordered != QUEUE_ORDERED_NONE)
			return start_ordered(q, rqp);
		else {
J
Jens Axboe 已提交
249
			/*
250 251
			 * Queue ordering not supported.  Terminate
			 * with prejudice.
J
Jens Axboe 已提交
252
			 */
253
			blk_dequeue_request(rq);
254
			__blk_end_request_all(rq, -EOPNOTSUPP);
J
Jens Axboe 已提交
255
			*rqp = NULL;
256
			return false;
J
Jens Axboe 已提交
257 258 259 260 261 262 263 264
		}
	}

	/*
	 * Ordered sequence in progress
	 */

	/* Special requests are not subject to ordering rules. */
265
	if (rq->cmd_type != REQ_TYPE_FS &&
J
Jens Axboe 已提交
266
	    rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
267
		return true;
J
Jens Axboe 已提交
268

269
	if (q->ordered & QUEUE_ORDERED_BY_TAG) {
J
Jens Axboe 已提交
270 271 272 273 274 275 276 277 278 279
		/* Ordered by tag.  Blocking the next barrier is enough. */
		if (is_barrier && rq != &q->bar_rq)
			*rqp = NULL;
	} else {
		/* Ordered by draining.  Wait for turn. */
		WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
		if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
			*rqp = NULL;
	}

280
	return true;
J
Jens Axboe 已提交
281 282 283 284
}

static void bio_end_empty_barrier(struct bio *bio, int err)
{
285 286 287
	if (err) {
		if (err == -EOPNOTSUPP)
			set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
J
Jens Axboe 已提交
288
		clear_bit(BIO_UPTODATE, &bio->bi_flags);
289
	}
290 291 292
	if (bio->bi_private)
		complete(bio->bi_private);
	bio_put(bio);
J
Jens Axboe 已提交
293 294 295 296 297
}

/**
 * blkdev_issue_flush - queue a flush
 * @bdev:	blockdev to issue flush for
298
 * @gfp_mask:	memory allocation flags (for bio_alloc)
J
Jens Axboe 已提交
299
 * @error_sector:	error sector
300
 * @flags:	BLKDEV_IFL_* flags to control behaviour
J
Jens Axboe 已提交
301 302 303 304
 *
 * Description:
 *    Issue a flush for the block device in question. Caller can supply
 *    room for storing the error offset in case of a flush error, if they
305 306
 *    wish to. If WAIT flag is not passed then caller may check only what
 *    request was pushed in some internal queue for later handling.
J
Jens Axboe 已提交
307
 */
308 309
int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
		sector_t *error_sector, unsigned long flags)
J
Jens Axboe 已提交
310 311 312 313
{
	DECLARE_COMPLETION_ONSTACK(wait);
	struct request_queue *q;
	struct bio *bio;
314
	int ret = 0;
J
Jens Axboe 已提交
315 316 317 318 319 320 321 322

	if (bdev->bd_disk == NULL)
		return -ENXIO;

	q = bdev_get_queue(bdev);
	if (!q)
		return -ENXIO;

323
	bio = bio_alloc(gfp_mask, 0);
J
Jens Axboe 已提交
324 325
	bio->bi_end_io = bio_end_empty_barrier;
	bio->bi_bdev = bdev;
326 327
	if (test_bit(BLKDEV_WAIT, &flags))
		bio->bi_private = &wait;
J
Jens Axboe 已提交
328

329 330 331 332 333 334 335 336 337 338 339 340
	bio_get(bio);
	submit_bio(WRITE_BARRIER, bio);
	if (test_bit(BLKDEV_WAIT, &flags)) {
		wait_for_completion(&wait);
		/*
		 * The driver must store the error location in ->bi_sector, if
		 * it supports it. For non-stacked drivers, this should be
		 * copied from blk_rq_pos(rq).
		 */
		if (error_sector)
			*error_sector = bio->bi_sector;
	}
J
Jens Axboe 已提交
341

342 343 344
	if (bio_flagged(bio, BIO_EOPNOTSUPP))
		ret = -EOPNOTSUPP;
	else if (!bio_flagged(bio, BIO_UPTODATE))
J
Jens Axboe 已提交
345 346 347 348 349 350
		ret = -EIO;

	bio_put(bio);
	return ret;
}
EXPORT_SYMBOL(blkdev_issue_flush);