bio.c 34.1 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
L
Linus Torvalds 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public Licens
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
 *
 */
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mempool.h>
#include <linux/workqueue.h>
28
#include <linux/blktrace_api.h>
29
#include <trace/block.h>
30
#include <scsi/sg.h>		/* for struct sg_iovec */
L
Linus Torvalds 已提交
31

32 33
DEFINE_TRACE(block_split);

34
static struct kmem_cache *bio_slab __read_mostly;
L
Linus Torvalds 已提交
35

D
Denis ChengRq 已提交
36
static mempool_t *bio_split_pool __read_mostly;
L
Linus Torvalds 已提交
37 38 39 40 41 42 43 44

/*
 * if you change this list, also change bvec_alloc or things will
 * break badly! cannot be bigger than what you can fit into an
 * unsigned short
 */

#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
45
static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
L
Linus Torvalds 已提交
46 47 48 49 50 51 52 53
	BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
};
#undef BV

/*
 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
 * IO code that does not need private memory pools.
 */
54
struct bio_set *fs_bio_set;
L
Linus Torvalds 已提交
55

56 57 58 59 60
unsigned int bvec_nr_vecs(unsigned short idx)
{
	return bvec_slabs[idx].nr_vecs;
}

61
struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct bio_set *bs)
L
Linus Torvalds 已提交
62 63 64 65
{
	struct bio_vec *bvl;

	/*
J
Jens Axboe 已提交
66 67 68
	 * If 'bs' is given, lookup the pool and do the mempool alloc.
	 * If not, this is a bio_kmalloc() allocation and just do a
	 * kzalloc() for the exact number of vecs right away.
L
Linus Torvalds 已提交
69
	 */
J
Jens Axboe 已提交
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
	if (bs) {
		/*
		 * see comment near bvec_array define!
		 */
		switch (nr) {
		case 1:
			*idx = 0;
			break;
		case 2 ... 4:
			*idx = 1;
			break;
		case 5 ... 16:
			*idx = 2;
			break;
		case 17 ... 64:
			*idx = 3;
			break;
		case 65 ... 128:
			*idx = 4;
			break;
		case 129 ... BIO_MAX_PAGES:
			*idx = 5;
			break;
L
Linus Torvalds 已提交
93 94
		default:
			return NULL;
J
Jens Axboe 已提交
95
		}
L
Linus Torvalds 已提交
96

J
Jens Axboe 已提交
97 98 99 100 101 102 103 104 105
		/*
		 * idx now points to the pool we want to allocate from
		 */
		bvl = mempool_alloc(bs->bvec_pools[*idx], gfp_mask);
		if (bvl)
			memset(bvl, 0,
				bvec_nr_vecs(*idx) * sizeof(struct bio_vec));
	} else
		bvl = kzalloc(nr * sizeof(struct bio_vec), gfp_mask);
L
Linus Torvalds 已提交
106 107 108 109

	return bvl;
}

P
Peter Osterlund 已提交
110
void bio_free(struct bio *bio, struct bio_set *bio_set)
L
Linus Torvalds 已提交
111
{
112 113
	if (bio->bi_io_vec) {
		const int pool_idx = BIO_POOL_IDX(bio);
L
Linus Torvalds 已提交
114

115 116 117 118
		BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS);

		mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]);
	}
L
Linus Torvalds 已提交
119

120 121 122
	if (bio_integrity(bio))
		bio_integrity_free(bio, bio_set);

P
Peter Osterlund 已提交
123 124 125 126 127 128 129 130 131
	mempool_free(bio, bio_set->bio_pool);
}

/*
 * default destructor for a bio allocated with bio_alloc_bioset()
 */
static void bio_fs_destructor(struct bio *bio)
{
	bio_free(bio, fs_bio_set);
L
Linus Torvalds 已提交
132 133
}

J
Jens Axboe 已提交
134 135 136 137 138 139
static void bio_kmalloc_destructor(struct bio *bio)
{
	kfree(bio->bi_io_vec);
	kfree(bio);
}

140
void bio_init(struct bio *bio)
L
Linus Torvalds 已提交
141
{
J
Jens Axboe 已提交
142
	memset(bio, 0, sizeof(*bio));
L
Linus Torvalds 已提交
143
	bio->bi_flags = 1 << BIO_UPTODATE;
144
	bio->bi_comp_cpu = -1;
L
Linus Torvalds 已提交
145 146 147 148 149 150 151
	atomic_set(&bio->bi_cnt, 1);
}

/**
 * bio_alloc_bioset - allocate a bio for I/O
 * @gfp_mask:   the GFP_ mask given to the slab allocator
 * @nr_iovecs:	number of iovecs to pre-allocate
J
Jens Axboe 已提交
152
 * @bs:		the bio_set to allocate from. If %NULL, just use kmalloc
L
Linus Torvalds 已提交
153 154
 *
 * Description:
J
Jens Axboe 已提交
155
 *   bio_alloc_bioset will first try its own mempool to satisfy the allocation.
L
Linus Torvalds 已提交
156
 *   If %__GFP_WAIT is set then we will block on the internal pool waiting
J
Jens Axboe 已提交
157 158
 *   for a &struct bio to become free. If a %NULL @bs is passed in, we will
 *   fall back to just using @kmalloc to allocate the required memory.
L
Linus Torvalds 已提交
159 160
 *
 *   allocate bio and iovecs from the memory pools specified by the
J
Jens Axboe 已提交
161
 *   bio_set structure, or @kmalloc if none given.
L
Linus Torvalds 已提交
162
 **/
A
Al Viro 已提交
163
struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
L
Linus Torvalds 已提交
164
{
J
Jens Axboe 已提交
165 166 167 168 169 170
	struct bio *bio;

	if (bs)
		bio = mempool_alloc(bs->bio_pool, gfp_mask);
	else
		bio = kmalloc(sizeof(*bio), gfp_mask);
L
Linus Torvalds 已提交
171 172 173 174 175 176

	if (likely(bio)) {
		struct bio_vec *bvl = NULL;

		bio_init(bio);
		if (likely(nr_iovecs)) {
177
			unsigned long uninitialized_var(idx);
L
Linus Torvalds 已提交
178 179 180

			bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs);
			if (unlikely(!bvl)) {
J
Jens Axboe 已提交
181 182 183 184
				if (bs)
					mempool_free(bio, bs->bio_pool);
				else
					kfree(bio);
L
Linus Torvalds 已提交
185 186 187 188
				bio = NULL;
				goto out;
			}
			bio->bi_flags |= idx << BIO_POOL_OFFSET;
D
Denis ChengRq 已提交
189
			bio->bi_max_vecs = bvec_nr_vecs(idx);
L
Linus Torvalds 已提交
190 191 192 193 194 195 196
		}
		bio->bi_io_vec = bvl;
	}
out:
	return bio;
}

A
Al Viro 已提交
197
struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs)
L
Linus Torvalds 已提交
198
{
P
Peter Osterlund 已提交
199 200 201 202 203 204
	struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);

	if (bio)
		bio->bi_destructor = bio_fs_destructor;

	return bio;
L
Linus Torvalds 已提交
205 206
}

J
Jens Axboe 已提交
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
/*
 * Like bio_alloc(), but doesn't use a mempool backing. This means that
 * it CAN fail, but while bio_alloc() can only be used for allocations
 * that have a short (finite) life span, bio_kmalloc() should be used
 * for more permanent bio allocations (like allocating some bio's for
 * initalization or setup purposes).
 */
struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs)
{
	struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);

	if (bio)
		bio->bi_destructor = bio_kmalloc_destructor;

	return bio;
}

L
Linus Torvalds 已提交
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
void zero_fill_bio(struct bio *bio)
{
	unsigned long flags;
	struct bio_vec *bv;
	int i;

	bio_for_each_segment(bv, bio, i) {
		char *data = bvec_kmap_irq(bv, &flags);
		memset(data, 0, bv->bv_len);
		flush_dcache_page(bv->bv_page);
		bvec_kunmap_irq(data, &flags);
	}
}
EXPORT_SYMBOL(zero_fill_bio);

/**
 * bio_put - release a reference to a bio
 * @bio:   bio to release reference to
 *
 * Description:
 *   Put a reference to a &struct bio, either one you have gotten with
 *   bio_alloc or bio_get. The last put of a bio will free it.
 **/
void bio_put(struct bio *bio)
{
	BIO_BUG_ON(!atomic_read(&bio->bi_cnt));

	/*
	 * last put frees it
	 */
	if (atomic_dec_and_test(&bio->bi_cnt)) {
		bio->bi_next = NULL;
		bio->bi_destructor(bio);
	}
}

260
inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
L
Linus Torvalds 已提交
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
{
	if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
		blk_recount_segments(q, bio);

	return bio->bi_phys_segments;
}

/**
 * 	__bio_clone	-	clone a bio
 * 	@bio: destination bio
 * 	@bio_src: bio to clone
 *
 *	Clone a &bio. Caller will own the returned bio, but not
 *	the actual data it points to. Reference count of returned
 * 	bio will be one.
 */
277
void __bio_clone(struct bio *bio, struct bio *bio_src)
L
Linus Torvalds 已提交
278
{
279 280
	memcpy(bio->bi_io_vec, bio_src->bi_io_vec,
		bio_src->bi_max_vecs * sizeof(struct bio_vec));
L
Linus Torvalds 已提交
281

282 283 284 285
	/*
	 * most users will be overriding ->bi_bdev with a new target,
	 * so we don't set nor calculate new physical/hw segment counts here
	 */
L
Linus Torvalds 已提交
286 287 288 289 290 291
	bio->bi_sector = bio_src->bi_sector;
	bio->bi_bdev = bio_src->bi_bdev;
	bio->bi_flags |= 1 << BIO_CLONED;
	bio->bi_rw = bio_src->bi_rw;
	bio->bi_vcnt = bio_src->bi_vcnt;
	bio->bi_size = bio_src->bi_size;
A
Andrew Morton 已提交
292
	bio->bi_idx = bio_src->bi_idx;
L
Linus Torvalds 已提交
293 294 295 296 297 298 299 300 301
}

/**
 *	bio_clone	-	clone a bio
 *	@bio: bio to clone
 *	@gfp_mask: allocation priority
 *
 * 	Like __bio_clone, only also allocates the returned bio
 */
A
Al Viro 已提交
302
struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
L
Linus Torvalds 已提交
303 304 305
{
	struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set);

306 307 308 309 310 311 312 313 314 315 316 317 318
	if (!b)
		return NULL;

	b->bi_destructor = bio_fs_destructor;
	__bio_clone(b, bio);

	if (bio_integrity(bio)) {
		int ret;

		ret = bio_integrity_clone(b, bio, fs_bio_set);

		if (ret < 0)
			return NULL;
P
Peter Osterlund 已提交
319
	}
L
Linus Torvalds 已提交
320 321 322 323 324 325 326 327 328 329 330 331 332 333 334

	return b;
}

/**
 *	bio_get_nr_vecs		- return approx number of vecs
 *	@bdev:  I/O target
 *
 *	Return the approximate number of pages we can send to this target.
 *	There's no guarantee that you will be able to fit this number of pages
 *	into a bio, it does not account for dynamic restrictions that vary
 *	on offset.
 */
int bio_get_nr_vecs(struct block_device *bdev)
{
335
	struct request_queue *q = bdev_get_queue(bdev);
L
Linus Torvalds 已提交
336 337 338 339 340 341 342 343 344 345 346
	int nr_pages;

	nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
	if (nr_pages > q->max_phys_segments)
		nr_pages = q->max_phys_segments;
	if (nr_pages > q->max_hw_segments)
		nr_pages = q->max_hw_segments;

	return nr_pages;
}

347
static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
348 349
			  *page, unsigned int len, unsigned int offset,
			  unsigned short max_sectors)
L
Linus Torvalds 已提交
350 351 352 353 354 355 356 357 358 359
{
	int retried_segments = 0;
	struct bio_vec *bvec;

	/*
	 * cloned bio must not modify vec list
	 */
	if (unlikely(bio_flagged(bio, BIO_CLONED)))
		return 0;

360
	if (((bio->bi_size + len) >> 9) > max_sectors)
L
Linus Torvalds 已提交
361 362
		return 0;

363 364 365 366 367 368 369 370 371 372 373
	/*
	 * For filesystems with a blocksize smaller than the pagesize
	 * we will often be called with the same page as last time and
	 * a consecutive offset.  Optimize this special case.
	 */
	if (bio->bi_vcnt > 0) {
		struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];

		if (page == prev->bv_page &&
		    offset == prev->bv_offset + prev->bv_len) {
			prev->bv_len += len;
374 375 376 377 378 379 380 381 382 383 384 385 386

			if (q->merge_bvec_fn) {
				struct bvec_merge_data bvm = {
					.bi_bdev = bio->bi_bdev,
					.bi_sector = bio->bi_sector,
					.bi_size = bio->bi_size,
					.bi_rw = bio->bi_rw,
				};

				if (q->merge_bvec_fn(q, &bvm, prev) < len) {
					prev->bv_len -= len;
					return 0;
				}
387 388 389 390 391 392 393
			}

			goto done;
		}
	}

	if (bio->bi_vcnt >= bio->bi_max_vecs)
L
Linus Torvalds 已提交
394 395 396 397 398 399 400 401
		return 0;

	/*
	 * we might lose a segment or two here, but rather that than
	 * make this too complex.
	 */

	while (bio->bi_phys_segments >= q->max_phys_segments
M
Mikulas Patocka 已提交
402
	       || bio->bi_phys_segments >= q->max_hw_segments) {
L
Linus Torvalds 已提交
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425

		if (retried_segments)
			return 0;

		retried_segments = 1;
		blk_recount_segments(q, bio);
	}

	/*
	 * setup the new entry, we might clear it again later if we
	 * cannot add the page
	 */
	bvec = &bio->bi_io_vec[bio->bi_vcnt];
	bvec->bv_page = page;
	bvec->bv_len = len;
	bvec->bv_offset = offset;

	/*
	 * if queue has other restrictions (eg varying max sector size
	 * depending on offset), it can specify a merge_bvec_fn in the
	 * queue to get further control
	 */
	if (q->merge_bvec_fn) {
426 427 428 429 430 431 432
		struct bvec_merge_data bvm = {
			.bi_bdev = bio->bi_bdev,
			.bi_sector = bio->bi_sector,
			.bi_size = bio->bi_size,
			.bi_rw = bio->bi_rw,
		};

L
Linus Torvalds 已提交
433 434 435 436
		/*
		 * merge_bvec_fn() returns number of bytes it can accept
		 * at this offset
		 */
437
		if (q->merge_bvec_fn(q, &bvm, bvec) < len) {
L
Linus Torvalds 已提交
438 439 440 441 442 443 444 445
			bvec->bv_page = NULL;
			bvec->bv_len = 0;
			bvec->bv_offset = 0;
			return 0;
		}
	}

	/* If we may be able to merge these biovecs, force a recount */
446
	if (bio->bi_vcnt && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
L
Linus Torvalds 已提交
447 448 449 450
		bio->bi_flags &= ~(1 << BIO_SEG_VALID);

	bio->bi_vcnt++;
	bio->bi_phys_segments++;
451
 done:
L
Linus Torvalds 已提交
452 453 454 455
	bio->bi_size += len;
	return len;
}

456 457
/**
 *	bio_add_pc_page	-	attempt to add page to bio
J
Jens Axboe 已提交
458
 *	@q: the target queue
459 460 461 462 463 464 465 466 467 468 469
 *	@bio: destination bio
 *	@page: page to add
 *	@len: vec entry length
 *	@offset: vec entry offset
 *
 *	Attempt to add a page to the bio_vec maplist. This can fail for a
 *	number of reasons, such as the bio being full or target block
 *	device limitations. The target block device must allow bio's
 *      smaller than PAGE_SIZE, so it is always possible to add a single
 *      page to an empty bio. This should only be used by REQ_PC bios.
 */
470
int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,
471 472
		    unsigned int len, unsigned int offset)
{
473
	return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors);
474 475
}

L
Linus Torvalds 已提交
476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
/**
 *	bio_add_page	-	attempt to add page to bio
 *	@bio: destination bio
 *	@page: page to add
 *	@len: vec entry length
 *	@offset: vec entry offset
 *
 *	Attempt to add a page to the bio_vec maplist. This can fail for a
 *	number of reasons, such as the bio being full or target block
 *	device limitations. The target block device must allow bio's
 *      smaller than PAGE_SIZE, so it is always possible to add a single
 *      page to an empty bio.
 */
int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
		 unsigned int offset)
{
492 493
	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
	return __bio_add_page(q, bio, page, len, offset, q->max_sectors);
L
Linus Torvalds 已提交
494 495 496 497
}

struct bio_map_data {
	struct bio_vec *iovecs;
498
	struct sg_iovec *sgvecs;
499 500
	int nr_sgvecs;
	int is_our_pages;
L
Linus Torvalds 已提交
501 502
};

503
static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio,
504 505
			     struct sg_iovec *iov, int iov_count,
			     int is_our_pages)
L
Linus Torvalds 已提交
506 507
{
	memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt);
508 509
	memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count);
	bmd->nr_sgvecs = iov_count;
510
	bmd->is_our_pages = is_our_pages;
L
Linus Torvalds 已提交
511 512 513 514 515 516
	bio->bi_private = bmd;
}

static void bio_free_map_data(struct bio_map_data *bmd)
{
	kfree(bmd->iovecs);
517
	kfree(bmd->sgvecs);
L
Linus Torvalds 已提交
518 519 520
	kfree(bmd);
}

521 522
static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count,
					       gfp_t gfp_mask)
L
Linus Torvalds 已提交
523
{
524
	struct bio_map_data *bmd = kmalloc(sizeof(*bmd), gfp_mask);
L
Linus Torvalds 已提交
525 526 527 528

	if (!bmd)
		return NULL;

529
	bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, gfp_mask);
530 531 532 533 534
	if (!bmd->iovecs) {
		kfree(bmd);
		return NULL;
	}

535
	bmd->sgvecs = kmalloc(sizeof(struct sg_iovec) * iov_count, gfp_mask);
536
	if (bmd->sgvecs)
L
Linus Torvalds 已提交
537 538
		return bmd;

539
	kfree(bmd->iovecs);
L
Linus Torvalds 已提交
540 541 542 543
	kfree(bmd);
	return NULL;
}

544
static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
545 546
			  struct sg_iovec *iov, int iov_count, int uncopy,
			  int do_free_page)
547 548 549 550 551 552 553 554 555
{
	int ret = 0, i;
	struct bio_vec *bvec;
	int iov_idx = 0;
	unsigned int iov_off = 0;
	int read = bio_data_dir(bio) == READ;

	__bio_for_each_segment(bvec, bio, i, 0) {
		char *bv_addr = page_address(bvec->bv_page);
556
		unsigned int bv_len = iovecs[i].bv_len;
557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588

		while (bv_len && iov_idx < iov_count) {
			unsigned int bytes;
			char *iov_addr;

			bytes = min_t(unsigned int,
				      iov[iov_idx].iov_len - iov_off, bv_len);
			iov_addr = iov[iov_idx].iov_base + iov_off;

			if (!ret) {
				if (!read && !uncopy)
					ret = copy_from_user(bv_addr, iov_addr,
							     bytes);
				if (read && uncopy)
					ret = copy_to_user(iov_addr, bv_addr,
							   bytes);

				if (ret)
					ret = -EFAULT;
			}

			bv_len -= bytes;
			bv_addr += bytes;
			iov_addr += bytes;
			iov_off += bytes;

			if (iov[iov_idx].iov_len == iov_off) {
				iov_idx++;
				iov_off = 0;
			}
		}

589
		if (do_free_page)
590 591 592 593 594 595
			__free_page(bvec->bv_page);
	}

	return ret;
}

L
Linus Torvalds 已提交
596 597 598 599 600 601 602 603 604 605
/**
 *	bio_uncopy_user	-	finish previously mapped bio
 *	@bio: bio being terminated
 *
 *	Free pages allocated from bio_copy_user() and write back data
 *	to user space in case of a read.
 */
int bio_uncopy_user(struct bio *bio)
{
	struct bio_map_data *bmd = bio->bi_private;
606
	int ret = 0;
L
Linus Torvalds 已提交
607

608 609 610
	if (!bio_flagged(bio, BIO_NULL_MAPPED))
		ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
				     bmd->nr_sgvecs, 1, bmd->is_our_pages);
L
Linus Torvalds 已提交
611 612 613 614 615 616
	bio_free_map_data(bmd);
	bio_put(bio);
	return ret;
}

/**
617
 *	bio_copy_user_iov	-	copy user data to bio
L
Linus Torvalds 已提交
618
 *	@q: destination block queue
619
 *	@map_data: pointer to the rq_map_data holding pages (if necessary)
620 621
 *	@iov:	the iovec.
 *	@iov_count: number of elements in the iovec
L
Linus Torvalds 已提交
622
 *	@write_to_vm: bool indicating writing to pages or not
623
 *	@gfp_mask: memory allocation flags
L
Linus Torvalds 已提交
624 625 626 627 628
 *
 *	Prepares and returns a bio for indirect user io, bouncing data
 *	to/from kernel pages as necessary. Must be paired with
 *	call bio_uncopy_user() on io completion.
 */
629 630 631 632
struct bio *bio_copy_user_iov(struct request_queue *q,
			      struct rq_map_data *map_data,
			      struct sg_iovec *iov, int iov_count,
			      int write_to_vm, gfp_t gfp_mask)
L
Linus Torvalds 已提交
633 634 635 636 637 638
{
	struct bio_map_data *bmd;
	struct bio_vec *bvec;
	struct page *page;
	struct bio *bio;
	int i, ret;
639 640
	int nr_pages = 0;
	unsigned int len = 0;
L
Linus Torvalds 已提交
641

642 643 644 645 646 647 648 649 650 651 652 653 654
	for (i = 0; i < iov_count; i++) {
		unsigned long uaddr;
		unsigned long end;
		unsigned long start;

		uaddr = (unsigned long)iov[i].iov_base;
		end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
		start = uaddr >> PAGE_SHIFT;

		nr_pages += end - start;
		len += iov[i].iov_len;
	}

655
	bmd = bio_alloc_map_data(nr_pages, iov_count, gfp_mask);
L
Linus Torvalds 已提交
656 657 658 659
	if (!bmd)
		return ERR_PTR(-ENOMEM);

	ret = -ENOMEM;
660
	bio = bio_alloc(gfp_mask, nr_pages);
L
Linus Torvalds 已提交
661 662 663 664 665 666
	if (!bio)
		goto out_bmd;

	bio->bi_rw |= (!write_to_vm << BIO_RW);

	ret = 0;
667
	i = 0;
L
Linus Torvalds 已提交
668
	while (len) {
669 670 671 672 673 674
		unsigned int bytes;

		if (map_data)
			bytes = 1U << (PAGE_SHIFT + map_data->page_order);
		else
			bytes = PAGE_SIZE;
L
Linus Torvalds 已提交
675 676 677 678

		if (bytes > len)
			bytes = len;

679 680 681 682 683 684 685 686
		if (map_data) {
			if (i == map_data->nr_entries) {
				ret = -ENOMEM;
				break;
			}
			page = map_data->pages[i++];
		} else
			page = alloc_page(q->bounce_gfp | gfp_mask);
L
Linus Torvalds 已提交
687 688 689 690 691
		if (!page) {
			ret = -ENOMEM;
			break;
		}

692
		if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
L
Linus Torvalds 已提交
693 694 695 696 697 698 699 700 701 702 703 704
			break;

		len -= bytes;
	}

	if (ret)
		goto cleanup;

	/*
	 * success
	 */
	if (!write_to_vm) {
705
		ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0, 0);
706 707
		if (ret)
			goto cleanup;
L
Linus Torvalds 已提交
708 709
	}

710
	bio_set_map_data(bmd, bio, iov, iov_count, map_data ? 0 : 1);
L
Linus Torvalds 已提交
711 712
	return bio;
cleanup:
713 714 715
	if (!map_data)
		bio_for_each_segment(bvec, bio, i)
			__free_page(bvec->bv_page);
L
Linus Torvalds 已提交
716 717 718 719 720 721 722

	bio_put(bio);
out_bmd:
	bio_free_map_data(bmd);
	return ERR_PTR(ret);
}

723 724 725
/**
 *	bio_copy_user	-	copy user data to bio
 *	@q: destination block queue
726
 *	@map_data: pointer to the rq_map_data holding pages (if necessary)
727 728 729
 *	@uaddr: start of user address
 *	@len: length in bytes
 *	@write_to_vm: bool indicating writing to pages or not
730
 *	@gfp_mask: memory allocation flags
731 732 733 734 735
 *
 *	Prepares and returns a bio for indirect user io, bouncing data
 *	to/from kernel pages as necessary. Must be paired with
 *	call bio_uncopy_user() on io completion.
 */
736 737 738
struct bio *bio_copy_user(struct request_queue *q, struct rq_map_data *map_data,
			  unsigned long uaddr, unsigned int len,
			  int write_to_vm, gfp_t gfp_mask)
739 740 741 742 743 744
{
	struct sg_iovec iov;

	iov.iov_base = (void __user *)uaddr;
	iov.iov_len = len;

745
	return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask);
746 747
}

748
static struct bio *__bio_map_user_iov(struct request_queue *q,
749 750
				      struct block_device *bdev,
				      struct sg_iovec *iov, int iov_count,
751
				      int write_to_vm, gfp_t gfp_mask)
L
Linus Torvalds 已提交
752
{
753 754
	int i, j;
	int nr_pages = 0;
L
Linus Torvalds 已提交
755 756
	struct page **pages;
	struct bio *bio;
757 758
	int cur_page = 0;
	int ret, offset;
L
Linus Torvalds 已提交
759

760 761 762 763 764 765 766 767
	for (i = 0; i < iov_count; i++) {
		unsigned long uaddr = (unsigned long)iov[i].iov_base;
		unsigned long len = iov[i].iov_len;
		unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
		unsigned long start = uaddr >> PAGE_SHIFT;

		nr_pages += end - start;
		/*
768
		 * buffer must be aligned to at least hardsector size for now
769
		 */
770
		if (uaddr & queue_dma_alignment(q))
771 772 773 774
			return ERR_PTR(-EINVAL);
	}

	if (!nr_pages)
L
Linus Torvalds 已提交
775 776
		return ERR_PTR(-EINVAL);

777
	bio = bio_alloc(gfp_mask, nr_pages);
L
Linus Torvalds 已提交
778 779 780 781
	if (!bio)
		return ERR_PTR(-ENOMEM);

	ret = -ENOMEM;
782
	pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask);
L
Linus Torvalds 已提交
783 784 785
	if (!pages)
		goto out;

786 787 788 789 790 791 792 793
	for (i = 0; i < iov_count; i++) {
		unsigned long uaddr = (unsigned long)iov[i].iov_base;
		unsigned long len = iov[i].iov_len;
		unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
		unsigned long start = uaddr >> PAGE_SHIFT;
		const int local_nr_pages = end - start;
		const int page_limit = cur_page + local_nr_pages;
		
N
Nick Piggin 已提交
794 795
		ret = get_user_pages_fast(uaddr, local_nr_pages,
				write_to_vm, &pages[cur_page]);
796 797
		if (ret < local_nr_pages) {
			ret = -EFAULT;
798
			goto out_unmap;
799
		}
800 801 802 803 804 805 806 807 808 809 810 811 812 813

		offset = uaddr & ~PAGE_MASK;
		for (j = cur_page; j < page_limit; j++) {
			unsigned int bytes = PAGE_SIZE - offset;

			if (len <= 0)
				break;
			
			if (bytes > len)
				bytes = len;

			/*
			 * sorry...
			 */
814 815
			if (bio_add_pc_page(q, bio, pages[j], bytes, offset) <
					    bytes)
816 817 818 819 820
				break;

			len -= bytes;
			offset = 0;
		}
L
Linus Torvalds 已提交
821

822
		cur_page = j;
L
Linus Torvalds 已提交
823
		/*
824
		 * release the pages we didn't map into the bio, if any
L
Linus Torvalds 已提交
825
		 */
826 827
		while (j < page_limit)
			page_cache_release(pages[j++]);
L
Linus Torvalds 已提交
828 829 830 831 832 833 834 835 836 837
	}

	kfree(pages);

	/*
	 * set data direction, and check if mapped pages need bouncing
	 */
	if (!write_to_vm)
		bio->bi_rw |= (1 << BIO_RW);

838
	bio->bi_bdev = bdev;
L
Linus Torvalds 已提交
839 840
	bio->bi_flags |= (1 << BIO_USER_MAPPED);
	return bio;
841 842 843 844 845 846 847 848

 out_unmap:
	for (i = 0; i < nr_pages; i++) {
		if(!pages[i])
			break;
		page_cache_release(pages[i]);
	}
 out:
L
Linus Torvalds 已提交
849 850 851 852 853 854 855
	kfree(pages);
	bio_put(bio);
	return ERR_PTR(ret);
}

/**
 *	bio_map_user	-	map user address into bio
856
 *	@q: the struct request_queue for the bio
L
Linus Torvalds 已提交
857 858 859 860
 *	@bdev: destination block device
 *	@uaddr: start of user address
 *	@len: length in bytes
 *	@write_to_vm: bool indicating writing to pages or not
861
 *	@gfp_mask: memory allocation flags
L
Linus Torvalds 已提交
862 863 864 865
 *
 *	Map the user space address into a bio suitable for io to a block
 *	device. Returns an error pointer in case of error.
 */
866
struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev,
867 868
			 unsigned long uaddr, unsigned int len, int write_to_vm,
			 gfp_t gfp_mask)
869 870 871
{
	struct sg_iovec iov;

872
	iov.iov_base = (void __user *)uaddr;
873 874
	iov.iov_len = len;

875
	return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask);
876 877 878 879
}

/**
 *	bio_map_user_iov - map user sg_iovec table into bio
880
 *	@q: the struct request_queue for the bio
881 882 883 884
 *	@bdev: destination block device
 *	@iov:	the iovec.
 *	@iov_count: number of elements in the iovec
 *	@write_to_vm: bool indicating writing to pages or not
885
 *	@gfp_mask: memory allocation flags
886 887 888 889
 *
 *	Map the user space address into a bio suitable for io to a block
 *	device. Returns an error pointer in case of error.
 */
890
struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev,
891
			     struct sg_iovec *iov, int iov_count,
892
			     int write_to_vm, gfp_t gfp_mask)
L
Linus Torvalds 已提交
893 894 895
{
	struct bio *bio;

896 897
	bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm,
				 gfp_mask);
L
Linus Torvalds 已提交
898 899 900 901 902 903 904 905 906 907 908
	if (IS_ERR(bio))
		return bio;

	/*
	 * subtle -- if __bio_map_user() ended up bouncing a bio,
	 * it would normally disappear when its bi_end_io is run.
	 * however, we need it for the unmap, so grab an extra
	 * reference to it
	 */
	bio_get(bio);

909
	return bio;
L
Linus Torvalds 已提交
910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944
}

static void __bio_unmap_user(struct bio *bio)
{
	struct bio_vec *bvec;
	int i;

	/*
	 * make sure we dirty pages we wrote to
	 */
	__bio_for_each_segment(bvec, bio, i, 0) {
		if (bio_data_dir(bio) == READ)
			set_page_dirty_lock(bvec->bv_page);

		page_cache_release(bvec->bv_page);
	}

	bio_put(bio);
}

/**
 *	bio_unmap_user	-	unmap a bio
 *	@bio:		the bio being unmapped
 *
 *	Unmap a bio previously mapped by bio_map_user(). Must be called with
 *	a process context.
 *
 *	bio_unmap_user() may sleep.
 */
void bio_unmap_user(struct bio *bio)
{
	__bio_unmap_user(bio);
	bio_put(bio);
}

945
static void bio_map_kern_endio(struct bio *bio, int err)
946 947 948 949 950
{
	bio_put(bio);
}


951
static struct bio *__bio_map_kern(struct request_queue *q, void *data,
A
Al Viro 已提交
952
				  unsigned int len, gfp_t gfp_mask)
M
Mike Christie 已提交
953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974
{
	unsigned long kaddr = (unsigned long)data;
	unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
	unsigned long start = kaddr >> PAGE_SHIFT;
	const int nr_pages = end - start;
	int offset, i;
	struct bio *bio;

	bio = bio_alloc(gfp_mask, nr_pages);
	if (!bio)
		return ERR_PTR(-ENOMEM);

	offset = offset_in_page(kaddr);
	for (i = 0; i < nr_pages; i++) {
		unsigned int bytes = PAGE_SIZE - offset;

		if (len <= 0)
			break;

		if (bytes > len)
			bytes = len;

975 976
		if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
				    offset) < bytes)
M
Mike Christie 已提交
977 978 979 980 981 982 983
			break;

		data += bytes;
		len -= bytes;
		offset = 0;
	}

984
	bio->bi_end_io = bio_map_kern_endio;
M
Mike Christie 已提交
985 986 987 988 989
	return bio;
}

/**
 *	bio_map_kern	-	map kernel address into bio
990
 *	@q: the struct request_queue for the bio
M
Mike Christie 已提交
991 992 993 994 995 996 997
 *	@data: pointer to buffer to map
 *	@len: length in bytes
 *	@gfp_mask: allocation flags for bio allocation
 *
 *	Map the kernel address into a bio suitable for io to a block
 *	device. Returns an error pointer in case of error.
 */
998
struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
A
Al Viro 已提交
999
			 gfp_t gfp_mask)
M
Mike Christie 已提交
1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016
{
	struct bio *bio;

	bio = __bio_map_kern(q, data, len, gfp_mask);
	if (IS_ERR(bio))
		return bio;

	if (bio->bi_size == len)
		return bio;

	/*
	 * Don't support partial mappings.
	 */
	bio_put(bio);
	return ERR_PTR(-EINVAL);
}

1017 1018 1019 1020
static void bio_copy_kern_endio(struct bio *bio, int err)
{
	struct bio_vec *bvec;
	const int read = bio_data_dir(bio) == READ;
1021
	struct bio_map_data *bmd = bio->bi_private;
1022
	int i;
1023
	char *p = bmd->sgvecs[0].iov_base;
1024 1025 1026

	__bio_for_each_segment(bvec, bio, i, 0) {
		char *addr = page_address(bvec->bv_page);
1027
		int len = bmd->iovecs[i].bv_len;
1028 1029

		if (read && !err)
1030
			memcpy(p, addr, len);
1031 1032

		__free_page(bvec->bv_page);
1033
		p += len;
1034 1035
	}

1036
	bio_free_map_data(bmd);
1037 1038 1039 1040 1041 1042 1043 1044 1045
	bio_put(bio);
}

/**
 *	bio_copy_kern	-	copy kernel address into bio
 *	@q: the struct request_queue for the bio
 *	@data: pointer to buffer to copy
 *	@len: length in bytes
 *	@gfp_mask: allocation flags for bio and page allocation
1046
 *	@reading: data direction is READ
1047 1048 1049 1050 1051 1052 1053 1054 1055
 *
 *	copy the kernel address into a bio suitable for io to a block
 *	device. Returns an error pointer in case of error.
 */
struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
			  gfp_t gfp_mask, int reading)
{
	struct bio *bio;
	struct bio_vec *bvec;
1056
	int i;
1057

1058 1059 1060
	bio = bio_copy_user(q, NULL, (unsigned long)data, len, 1, gfp_mask);
	if (IS_ERR(bio))
		return bio;
1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073

	if (!reading) {
		void *p = data;

		bio_for_each_segment(bvec, bio, i) {
			char *addr = page_address(bvec->bv_page);

			memcpy(addr, p, bvec->bv_len);
			p += bvec->bv_len;
		}
	}

	bio->bi_end_io = bio_copy_kern_endio;
1074

1075 1076 1077
	return bio;
}

L
Linus Torvalds 已提交
1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119
/*
 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
 * for performing direct-IO in BIOs.
 *
 * The problem is that we cannot run set_page_dirty() from interrupt context
 * because the required locks are not interrupt-safe.  So what we can do is to
 * mark the pages dirty _before_ performing IO.  And in interrupt context,
 * check that the pages are still dirty.   If so, fine.  If not, redirty them
 * in process context.
 *
 * We special-case compound pages here: normally this means reads into hugetlb
 * pages.  The logic in here doesn't really work right for compound pages
 * because the VM does not uniformly chase down the head page in all cases.
 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
 * handle them at all.  So we skip compound pages here at an early stage.
 *
 * Note that this code is very hard to test under normal circumstances because
 * direct-io pins the pages with get_user_pages().  This makes
 * is_page_cache_freeable return false, and the VM will not clean the pages.
 * But other code (eg, pdflush) could clean the pages if they are mapped
 * pagecache.
 *
 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
 * deferred bio dirtying paths.
 */

/*
 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
 */
void bio_set_pages_dirty(struct bio *bio)
{
	struct bio_vec *bvec = bio->bi_io_vec;
	int i;

	for (i = 0; i < bio->bi_vcnt; i++) {
		struct page *page = bvec[i].bv_page;

		if (page && !PageCompound(page))
			set_page_dirty_lock(page);
	}
}

1120
static void bio_release_pages(struct bio *bio)
L
Linus Torvalds 已提交
1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143
{
	struct bio_vec *bvec = bio->bi_io_vec;
	int i;

	for (i = 0; i < bio->bi_vcnt; i++) {
		struct page *page = bvec[i].bv_page;

		if (page)
			put_page(page);
	}
}

/*
 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
 * If they are, then fine.  If, however, some pages are clean then they must
 * have been written out during the direct-IO read.  So we take another ref on
 * the BIO and the offending pages and re-dirty the pages in process context.
 *
 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
 * here on.  It will run one page_cache_release() against each page and will
 * run one bio_put() against the BIO.
 */

1144
static void bio_dirty_fn(struct work_struct *work);
L
Linus Torvalds 已提交
1145

1146
static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
L
Linus Torvalds 已提交
1147 1148 1149 1150 1151 1152
static DEFINE_SPINLOCK(bio_dirty_lock);
static struct bio *bio_dirty_list;

/*
 * This runs in process context
 */
1153
static void bio_dirty_fn(struct work_struct *work)
L
Linus Torvalds 已提交
1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208
{
	unsigned long flags;
	struct bio *bio;

	spin_lock_irqsave(&bio_dirty_lock, flags);
	bio = bio_dirty_list;
	bio_dirty_list = NULL;
	spin_unlock_irqrestore(&bio_dirty_lock, flags);

	while (bio) {
		struct bio *next = bio->bi_private;

		bio_set_pages_dirty(bio);
		bio_release_pages(bio);
		bio_put(bio);
		bio = next;
	}
}

void bio_check_pages_dirty(struct bio *bio)
{
	struct bio_vec *bvec = bio->bi_io_vec;
	int nr_clean_pages = 0;
	int i;

	for (i = 0; i < bio->bi_vcnt; i++) {
		struct page *page = bvec[i].bv_page;

		if (PageDirty(page) || PageCompound(page)) {
			page_cache_release(page);
			bvec[i].bv_page = NULL;
		} else {
			nr_clean_pages++;
		}
	}

	if (nr_clean_pages) {
		unsigned long flags;

		spin_lock_irqsave(&bio_dirty_lock, flags);
		bio->bi_private = bio_dirty_list;
		bio_dirty_list = bio;
		spin_unlock_irqrestore(&bio_dirty_lock, flags);
		schedule_work(&bio_dirty_work);
	} else {
		bio_put(bio);
	}
}

/**
 * bio_endio - end I/O on a bio
 * @bio:	bio
 * @error:	error, if any
 *
 * Description:
1209
 *   bio_endio() will end I/O on the whole bio. bio_endio() is the
N
NeilBrown 已提交
1210 1211 1212 1213 1214 1215
 *   preferred way to end I/O on a bio, it takes care of clearing
 *   BIO_UPTODATE on error. @error is 0 on success, and and one of the
 *   established -Exxxx (-EIO, for instance) error values in case
 *   something went wrong. Noone should call bi_end_io() directly on a
 *   bio unless they own it and thus know that it has an end_io
 *   function.
L
Linus Torvalds 已提交
1216
 **/
1217
void bio_endio(struct bio *bio, int error)
L
Linus Torvalds 已提交
1218 1219 1220
{
	if (error)
		clear_bit(BIO_UPTODATE, &bio->bi_flags);
N
NeilBrown 已提交
1221 1222
	else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
		error = -EIO;
L
Linus Torvalds 已提交
1223

N
NeilBrown 已提交
1224
	if (bio->bi_end_io)
1225
		bio->bi_end_io(bio, error);
L
Linus Torvalds 已提交
1226 1227 1228 1229 1230 1231 1232
}

void bio_pair_release(struct bio_pair *bp)
{
	if (atomic_dec_and_test(&bp->cnt)) {
		struct bio *master = bp->bio1.bi_private;

1233
		bio_endio(master, bp->error);
L
Linus Torvalds 已提交
1234 1235 1236 1237
		mempool_free(bp, bp->bio2.bi_private);
	}
}

1238
static void bio_pair_end_1(struct bio *bi, int err)
L
Linus Torvalds 已提交
1239 1240 1241 1242 1243 1244 1245 1246 1247
{
	struct bio_pair *bp = container_of(bi, struct bio_pair, bio1);

	if (err)
		bp->error = err;

	bio_pair_release(bp);
}

1248
static void bio_pair_end_2(struct bio *bi, int err)
L
Linus Torvalds 已提交
1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261
{
	struct bio_pair *bp = container_of(bi, struct bio_pair, bio2);

	if (err)
		bp->error = err;

	bio_pair_release(bp);
}

/*
 * split a bio - only worry about a bio with a single page
 * in it's iovec
 */
D
Denis ChengRq 已提交
1262
struct bio_pair *bio_split(struct bio *bi, int first_sectors)
L
Linus Torvalds 已提交
1263
{
D
Denis ChengRq 已提交
1264
	struct bio_pair *bp = mempool_alloc(bio_split_pool, GFP_NOIO);
L
Linus Torvalds 已提交
1265 1266 1267 1268

	if (!bp)
		return bp;

1269
	trace_block_split(bdev_get_queue(bi->bi_bdev), bi,
1270 1271
				bi->bi_sector + first_sectors);

L
Linus Torvalds 已提交
1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290
	BUG_ON(bi->bi_vcnt != 1);
	BUG_ON(bi->bi_idx != 0);
	atomic_set(&bp->cnt, 3);
	bp->error = 0;
	bp->bio1 = *bi;
	bp->bio2 = *bi;
	bp->bio2.bi_sector += first_sectors;
	bp->bio2.bi_size -= first_sectors << 9;
	bp->bio1.bi_size = first_sectors << 9;

	bp->bv1 = bi->bi_io_vec[0];
	bp->bv2 = bi->bi_io_vec[0];
	bp->bv2.bv_offset += first_sectors << 9;
	bp->bv2.bv_len -= first_sectors << 9;
	bp->bv1.bv_len = first_sectors << 9;

	bp->bio1.bi_io_vec = &bp->bv1;
	bp->bio2.bi_io_vec = &bp->bv2;

1291 1292 1293
	bp->bio1.bi_max_vecs = 1;
	bp->bio2.bi_max_vecs = 1;

L
Linus Torvalds 已提交
1294 1295 1296 1297
	bp->bio1.bi_end_io = bio_pair_end_1;
	bp->bio2.bi_end_io = bio_pair_end_2;

	bp->bio1.bi_private = bi;
D
Denis ChengRq 已提交
1298
	bp->bio2.bi_private = bio_split_pool;
L
Linus Torvalds 已提交
1299

1300 1301 1302
	if (bio_integrity(bi))
		bio_integrity_split(bi, bp, first_sectors);

L
Linus Torvalds 已提交
1303 1304 1305
	return bp;
}

1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341
/**
 *      bio_sector_offset - Find hardware sector offset in bio
 *      @bio:           bio to inspect
 *      @index:         bio_vec index
 *      @offset:        offset in bv_page
 *
 *      Return the number of hardware sectors between beginning of bio
 *      and an end point indicated by a bio_vec index and an offset
 *      within that vector's page.
 */
sector_t bio_sector_offset(struct bio *bio, unsigned short index,
			   unsigned int offset)
{
	unsigned int sector_sz = queue_hardsect_size(bio->bi_bdev->bd_disk->queue);
	struct bio_vec *bv;
	sector_t sectors;
	int i;

	sectors = 0;

	if (index >= bio->bi_idx)
		index = bio->bi_vcnt - 1;

	__bio_for_each_segment(bv, bio, i, 0) {
		if (i == index) {
			if (offset > bv->bv_offset)
				sectors += (offset - bv->bv_offset) / sector_sz;
			break;
		}

		sectors += bv->bv_len / sector_sz;
	}

	return sectors;
}
EXPORT_SYMBOL(bio_sector_offset);
L
Linus Torvalds 已提交
1342 1343 1344 1345 1346

/*
 * create memory pools for biovec's in a bio_set.
 * use the global biovec slabs created for general use.
 */
1347
static int biovec_create_pools(struct bio_set *bs, int pool_entries)
L
Linus Torvalds 已提交
1348 1349 1350 1351 1352 1353 1354
{
	int i;

	for (i = 0; i < BIOVEC_NR_POOLS; i++) {
		struct biovec_slab *bp = bvec_slabs + i;
		mempool_t **bvp = bs->bvec_pools + i;

1355
		*bvp = mempool_create_slab_pool(pool_entries, bp->slab);
L
Linus Torvalds 已提交
1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379
		if (!*bvp)
			return -ENOMEM;
	}
	return 0;
}

static void biovec_free_pools(struct bio_set *bs)
{
	int i;

	for (i = 0; i < BIOVEC_NR_POOLS; i++) {
		mempool_t *bvp = bs->bvec_pools[i];

		if (bvp)
			mempool_destroy(bvp);
	}

}

void bioset_free(struct bio_set *bs)
{
	if (bs->bio_pool)
		mempool_destroy(bs->bio_pool);

1380
	bioset_integrity_free(bs);
L
Linus Torvalds 已提交
1381 1382 1383 1384 1385
	biovec_free_pools(bs);

	kfree(bs);
}

1386
struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size)
L
Linus Torvalds 已提交
1387
{
1388
	struct bio_set *bs = kzalloc(sizeof(*bs), GFP_KERNEL);
L
Linus Torvalds 已提交
1389 1390 1391 1392

	if (!bs)
		return NULL;

1393
	bs->bio_pool = mempool_create_slab_pool(bio_pool_size, bio_slab);
L
Linus Torvalds 已提交
1394 1395 1396
	if (!bs->bio_pool)
		goto bad;

1397 1398 1399
	if (bioset_integrity_create(bs, bio_pool_size))
		goto bad;

1400
	if (!biovec_create_pools(bs, bvec_pool_size))
L
Linus Torvalds 已提交
1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417
		return bs;

bad:
	bioset_free(bs);
	return NULL;
}

static void __init biovec_init_slabs(void)
{
	int i;

	for (i = 0; i < BIOVEC_NR_POOLS; i++) {
		int size;
		struct biovec_slab *bvs = bvec_slabs + i;

		size = bvs->nr_vecs * sizeof(struct bio_vec);
		bvs->slab = kmem_cache_create(bvs->name, size, 0,
1418
                                SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
L
Linus Torvalds 已提交
1419 1420 1421 1422 1423
	}
}

static int __init init_bio(void)
{
1424
	bio_slab = KMEM_CACHE(bio, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
L
Linus Torvalds 已提交
1425

1426
	bio_integrity_init_slab();
L
Linus Torvalds 已提交
1427 1428
	biovec_init_slabs();

1429
	fs_bio_set = bioset_create(BIO_POOL_SIZE, 2);
L
Linus Torvalds 已提交
1430 1431 1432
	if (!fs_bio_set)
		panic("bio: can't allocate bios\n");

1433 1434
	bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES,
						     sizeof(struct bio_pair));
L
Linus Torvalds 已提交
1435 1436 1437 1438 1439 1440 1441 1442 1443
	if (!bio_split_pool)
		panic("bio: can't create split pool\n");

	return 0;
}

subsys_initcall(init_bio);

EXPORT_SYMBOL(bio_alloc);
J
Jens Axboe 已提交
1444
EXPORT_SYMBOL(bio_kmalloc);
L
Linus Torvalds 已提交
1445
EXPORT_SYMBOL(bio_put);
P
Peter Osterlund 已提交
1446
EXPORT_SYMBOL(bio_free);
L
Linus Torvalds 已提交
1447 1448 1449 1450 1451 1452
EXPORT_SYMBOL(bio_endio);
EXPORT_SYMBOL(bio_init);
EXPORT_SYMBOL(__bio_clone);
EXPORT_SYMBOL(bio_clone);
EXPORT_SYMBOL(bio_phys_segments);
EXPORT_SYMBOL(bio_add_page);
1453
EXPORT_SYMBOL(bio_add_pc_page);
L
Linus Torvalds 已提交
1454
EXPORT_SYMBOL(bio_get_nr_vecs);
J
Jens Axboe 已提交
1455 1456
EXPORT_SYMBOL(bio_map_user);
EXPORT_SYMBOL(bio_unmap_user);
M
Mike Christie 已提交
1457
EXPORT_SYMBOL(bio_map_kern);
1458
EXPORT_SYMBOL(bio_copy_kern);
L
Linus Torvalds 已提交
1459 1460 1461 1462 1463 1464 1465
EXPORT_SYMBOL(bio_pair_release);
EXPORT_SYMBOL(bio_split);
EXPORT_SYMBOL(bio_copy_user);
EXPORT_SYMBOL(bio_uncopy_user);
EXPORT_SYMBOL(bioset_create);
EXPORT_SYMBOL(bioset_free);
EXPORT_SYMBOL(bio_alloc_bioset);