bio.h 20.3 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9
/*
 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
 */
#ifndef __LINUX_BIO_H
#define __LINUX_BIO_H

#include <linux/highmem.h>
#include <linux/mempool.h>
10
#include <linux/ioprio.h>
11 12
/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
#include <linux/blk_types.h>
13
#include <linux/uio.h>
14

L
Linus Torvalds 已提交
15 16 17 18 19 20 21 22
#define BIO_DEBUG

#ifdef BIO_DEBUG
#define BIO_BUG_ON	BUG_ON
#else
#define BIO_BUG_ON
#endif

23
#define BIO_MAX_PAGES		256
L
Linus Torvalds 已提交
24

25 26
#define bio_prio(bio)			(bio)->bi_ioprio
#define bio_set_prio(bio, prio)		((bio)->bi_ioprio = prio)
27

K
Kent Overstreet 已提交
28 29 30 31 32 33 34 35 36 37 38 39 40
#define bio_iter_iovec(bio, iter)				\
	bvec_iter_bvec((bio)->bi_io_vec, (iter))

#define bio_iter_page(bio, iter)				\
	bvec_iter_page((bio)->bi_io_vec, (iter))
#define bio_iter_len(bio, iter)					\
	bvec_iter_len((bio)->bi_io_vec, (iter))
#define bio_iter_offset(bio, iter)				\
	bvec_iter_offset((bio)->bi_io_vec, (iter))

#define bio_page(bio)		bio_iter_page((bio), (bio)->bi_iter)
#define bio_offset(bio)		bio_iter_offset((bio), (bio)->bi_iter)
#define bio_iovec(bio)		bio_iter_iovec((bio), (bio)->bi_iter)
41

42 43
#define bio_multiple_segments(bio)				\
	((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
44 45 46 47 48 49

#define bvec_iter_sectors(iter)	((iter).bi_size >> 9)
#define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))

#define bio_sectors(bio)	bvec_iter_sectors((bio)->bi_iter)
#define bio_end_sector(bio)	bvec_iter_end_sector((bio)->bi_iter)
50

51 52 53 54 55 56
/*
 * Return the data direction, READ or WRITE.
 */
#define bio_data_dir(bio) \
	(op_is_write(bio_op(bio)) ? WRITE : READ)

57 58 59 60 61 62 63
/*
 * Check whether this bio carries any data or not. A NULL bio is allowed.
 */
static inline bool bio_has_data(struct bio *bio)
{
	if (bio &&
	    bio->bi_iter.bi_size &&
A
Adrian Hunter 已提交
64
	    bio_op(bio) != REQ_OP_DISCARD &&
65 66
	    bio_op(bio) != REQ_OP_SECURE_ERASE &&
	    bio_op(bio) != REQ_OP_WRITE_ZEROES)
67 68 69 70 71
		return true;

	return false;
}

72
static inline bool bio_no_advance_iter(const struct bio *bio)
73
{
A
Adrian Hunter 已提交
74 75
	return bio_op(bio) == REQ_OP_DISCARD ||
	       bio_op(bio) == REQ_OP_SECURE_ERASE ||
76 77
	       bio_op(bio) == REQ_OP_WRITE_SAME ||
	       bio_op(bio) == REQ_OP_WRITE_ZEROES;
78 79
}

80 81
static inline bool bio_mergeable(struct bio *bio)
{
J
Jens Axboe 已提交
82
	if (bio->bi_opf & REQ_NOMERGE_FLAGS)
83 84 85 86 87
		return false;

	return true;
}

88
static inline unsigned int bio_cur_bytes(struct bio *bio)
89
{
90
	if (bio_has_data(bio))
91
		return bio_iovec(bio).bv_len;
D
David Woodhouse 已提交
92
	else /* dataless requests such as discard */
93
		return bio->bi_iter.bi_size;
94 95 96 97
}

static inline void *bio_data(struct bio *bio)
{
98
	if (bio_has_data(bio))
99 100 101 102
		return page_address(bio_page(bio)) + bio_offset(bio);

	return NULL;
}
L
Linus Torvalds 已提交
103

M
Ming Lei 已提交
104 105 106 107 108 109 110 111 112
/**
 * bio_full - check if the bio is full
 * @bio:	bio to check
 * @len:	length of one segment to be added
 *
 * Return true if @bio is full and one segment with @len bytes can't be
 * added to the bio, otherwise return false
 */
static inline bool bio_full(struct bio *bio, unsigned len)
113
{
M
Ming Lei 已提交
114 115 116 117 118 119 120
	if (bio->bi_vcnt >= bio->bi_max_vecs)
		return true;

	if (bio->bi_iter.bi_size > UINT_MAX - len)
		return true;

	return false;
121 122
}

123 124 125 126 127 128 129 130 131
static inline bool bio_next_segment(const struct bio *bio,
				    struct bvec_iter_all *iter)
{
	if (iter->idx >= bio->bi_vcnt)
		return false;

	bvec_advance(&bio->bi_io_vec[iter->idx], iter);
	return true;
}
132

133 134 135 136
/*
 * drivers should _never_ use the all version - the bio may have been split
 * before it got to the driver and the driver won't own all of it
 */
137 138
#define bio_for_each_segment_all(bvl, bio, iter) \
	for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); )
139

140 141
static inline void bio_advance_iter(const struct bio *bio,
				    struct bvec_iter *iter, unsigned int bytes)
K
Kent Overstreet 已提交
142 143 144
{
	iter->bi_sector += bytes >> 9;

M
Ming Lei 已提交
145
	if (bio_no_advance_iter(bio))
K
Kent Overstreet 已提交
146
		iter->bi_size -= bytes;
M
Ming Lei 已提交
147
	else
K
Kent Overstreet 已提交
148
		bvec_iter_advance(bio->bi_io_vec, iter, bytes);
149
		/* TODO: It is reasonable to complete bio with error here. */
D
Dmitry Monakhov 已提交
150 151
}

P
Pavel Begunkov 已提交
152 153 154 155 156 157 158 159 160 161 162 163 164
/* @bytes should be less or equal to bvec[i->bi_idx].bv_len */
static inline void bio_advance_iter_single(const struct bio *bio,
					   struct bvec_iter *iter,
					   unsigned int bytes)
{
	iter->bi_sector += bytes >> 9;

	if (bio_no_advance_iter(bio))
		iter->bi_size -= bytes;
	else
		bvec_iter_advance_single(bio->bi_io_vec, iter, bytes);
}

165 166
#define __bio_for_each_segment(bvl, bio, iter, start)			\
	for (iter = (start);						\
K
Kent Overstreet 已提交
167 168
	     (iter).bi_size &&						\
		((bvl = bio_iter_iovec((bio), (iter))), 1);		\
P
Pavel Begunkov 已提交
169
	     bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
170 171 172 173

#define bio_for_each_segment(bvl, bio, iter)				\
	__bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)

174 175 176 177
#define __bio_for_each_bvec(bvl, bio, iter, start)		\
	for (iter = (start);						\
	     (iter).bi_size &&						\
		((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \
P
Pavel Begunkov 已提交
178
	     bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
179 180 181 182 183

/* iterate over multi-page bvec */
#define bio_for_each_bvec(bvl, bio, iter)			\
	__bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter)

184 185 186 187 188 189 190 191
/*
 * Iterate over all multi-page bvecs. Drivers shouldn't use this version for the
 * same reasons as bio_for_each_segment_all().
 */
#define bio_for_each_bvec_all(bvl, bio, i)		\
	for (i = 0, bvl = bio_first_bvec_all(bio);	\
	     i < (bio)->bi_vcnt; i++, bvl++)		\

K
Kent Overstreet 已提交
192
#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
L
Linus Torvalds 已提交
193

194
static inline unsigned bio_segments(struct bio *bio)
195 196 197 198 199
{
	unsigned segs = 0;
	struct bio_vec bv;
	struct bvec_iter iter;

200
	/*
201 202
	 * We special case discard/write same/write zeroes, because they
	 * interpret bi_size differently:
203 204
	 */

205 206 207 208
	switch (bio_op(bio)) {
	case REQ_OP_DISCARD:
	case REQ_OP_SECURE_ERASE:
	case REQ_OP_WRITE_ZEROES:
209 210
		return 0;
	case REQ_OP_WRITE_SAME:
211
		return 1;
212 213 214
	default:
		break;
	}
215

216
	bio_for_each_segment(bv, bio, iter)
217 218 219 220 221
		segs++;

	return segs;
}

L
Linus Torvalds 已提交
222 223 224 225 226 227 228 229 230 231 232 233 234 235
/*
 * get a reference to a bio, so it won't disappear. the intended use is
 * something like:
 *
 * bio_get(bio);
 * submit_bio(rw, bio);
 * if (bio->bi_flags ...)
 *	do_something
 * bio_put(bio);
 *
 * without the bio_get(), it could potentially complete I/O before submit_bio
 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
 * runs
 */
236 237 238 239 240 241 242 243 244 245 246
static inline void bio_get(struct bio *bio)
{
	bio->bi_flags |= (1 << BIO_REFFED);
	smp_mb__before_atomic();
	atomic_inc(&bio->__bi_cnt);
}

static inline void bio_cnt_set(struct bio *bio, unsigned int count)
{
	if (count != 1) {
		bio->bi_flags |= (1 << BIO_REFFED);
247
		smp_mb();
248 249 250
	}
	atomic_set(&bio->__bi_cnt, count);
}
L
Linus Torvalds 已提交
251

252 253
static inline bool bio_flagged(struct bio *bio, unsigned int bit)
{
254
	return (bio->bi_flags & (1U << bit)) != 0;
255 256 257 258
}

static inline void bio_set_flag(struct bio *bio, unsigned int bit)
{
259
	bio->bi_flags |= (1U << bit);
260 261 262 263
}

static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
{
264
	bio->bi_flags &= ~(1U << bit);
265 266
}

267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
{
	*bv = bio_iovec(bio);
}

static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
{
	struct bvec_iter iter = bio->bi_iter;
	int idx;

	if (unlikely(!bio_multiple_segments(bio))) {
		*bv = bio_iovec(bio);
		return;
	}

	bio_advance_iter(bio, &iter, iter.bi_size);

	if (!iter.bi_bvec_done)
		idx = iter.bi_idx - 1;
	else	/* in the middle of bvec */
		idx = iter.bi_idx;

	*bv = bio->bi_io_vec[idx];

	/*
	 * iter.bi_bvec_done records actual length of the last bvec
	 * if this bio ends in the middle of one io vector
	 */
	if (iter.bi_bvec_done)
		bv->bv_len = iter.bi_bvec_done;
}

299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
static inline struct bio_vec *bio_first_bvec_all(struct bio *bio)
{
	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
	return bio->bi_io_vec;
}

static inline struct page *bio_first_page_all(struct bio *bio)
{
	return bio_first_bvec_all(bio)->bv_page;
}

static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
{
	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
	return &bio->bi_io_vec[bio->bi_vcnt - 1];
}

316 317 318 319 320 321 322 323
enum bip_flags {
	BIP_BLOCK_INTEGRITY	= 1 << 0, /* block layer owns integrity data */
	BIP_MAPPED_INTEGRITY	= 1 << 1, /* ref tag has been remapped */
	BIP_CTRL_NOCHECK	= 1 << 2, /* disable HBA integrity checking */
	BIP_DISK_NOCHECK	= 1 << 3, /* disable disk integrity checking */
	BIP_IP_CHECKSUM		= 1 << 4, /* IP checksum */
};

324 325 326 327 328 329
/*
 * bio integrity payload
 */
struct bio_integrity_payload {
	struct bio		*bip_bio;	/* parent bio */

330
	struct bvec_iter	bip_iter;
331 332

	unsigned short		bip_vcnt;	/* # of integrity bio_vecs */
333
	unsigned short		bip_max_vcnt;	/* integrity bio_vec slots */
334
	unsigned short		bip_flags;	/* control flags */
335

M
Ming Lei 已提交
336 337
	struct bvec_iter	bio_iter;	/* for rewinding parent bio */

338
	struct work_struct	bip_work;	/* I/O completion */
339 340

	struct bio_vec		*bip_vec;
341
	struct bio_vec		bip_inline_vecs[];/* embedded bvec array */
342
};
343

344 345 346 347
#if defined(CONFIG_BLK_DEV_INTEGRITY)

static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
{
J
Jens Axboe 已提交
348
	if (bio->bi_opf & REQ_INTEGRITY)
349 350 351 352 353
		return bio->bi_integrity;

	return NULL;
}

354 355 356 357 358 359 360 361 362
static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
{
	struct bio_integrity_payload *bip = bio_integrity(bio);

	if (bip)
		return bip->bip_flags & flag;

	return false;
}
363

364 365 366 367 368 369 370 371 372 373 374
static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
{
	return bip->bip_iter.bi_sector;
}

static inline void bip_set_seed(struct bio_integrity_payload *bip,
				sector_t seed)
{
	bip->bip_iter.bi_sector = seed;
}

375
#endif /* CONFIG_BLK_DEV_INTEGRITY */
L
Linus Torvalds 已提交
376

377
extern void bio_trim(struct bio *bio, int offset, int size);
K
Kent Overstreet 已提交
378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
extern struct bio *bio_split(struct bio *bio, int sectors,
			     gfp_t gfp, struct bio_set *bs);

/**
 * bio_next_split - get next @sectors from a bio, splitting if necessary
 * @bio:	bio to split
 * @sectors:	number of sectors to split from the front of @bio
 * @gfp:	gfp mask
 * @bs:		bio set to allocate from
 *
 * Returns a bio representing the next @sectors of @bio - if the bio is smaller
 * than @sectors, returns the original bio unchanged.
 */
static inline struct bio *bio_next_split(struct bio *bio, int sectors,
					 gfp_t gfp, struct bio_set *bs)
{
	if (sectors >= bio_sectors(bio))
		return bio;

	return bio_split(bio, sectors, gfp, bs);
}

400 401
enum {
	BIOSET_NEED_BVECS = BIT(0),
402
	BIOSET_NEED_RESCUER = BIT(1),
403
};
K
Kent Overstreet 已提交
404 405
extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags);
extern void bioset_exit(struct bio_set *);
406
extern int biovec_init_pool(mempool_t *pool, int pool_entries);
407
extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
L
Linus Torvalds 已提交
408

409 410 411
struct bio *bio_alloc_bioset(gfp_t gfp, unsigned short nr_iovecs,
		struct bio_set *bs);
struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs);
L
Linus Torvalds 已提交
412 413
extern void bio_put(struct bio *);

K
Kent Overstreet 已提交
414 415
extern void __bio_clone_fast(struct bio *, struct bio *);
extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
416

417
extern struct bio_set fs_bio_set;
418

419
static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned short nr_iovecs)
420
{
421
	return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set);
422 423
}

424 425
extern blk_qc_t submit_bio(struct bio *);

426 427 428 429
extern void bio_endio(struct bio *);

static inline void bio_io_error(struct bio *bio)
{
430
	bio->bi_status = BLK_STS_IOERR;
431 432 433
	bio_endio(bio);
}

434 435
static inline void bio_wouldblock_error(struct bio *bio)
{
436
	bio_set_flag(bio, BIO_QUIET);
437
	bio->bi_status = BLK_STS_AGAIN;
438 439 440
	bio_endio(bio);
}

441 442
/*
 * Calculate number of bvec segments that should be allocated to fit data
443 444
 * pointed by @iter. If @iter is backed by bvec it's going to be reused
 * instead of allocating a new one.
445 446 447
 */
static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs)
{
448 449
	if (iov_iter_is_bvec(iter))
		return 0;
450 451 452
	return iov_iter_npages(iter, max_segs);
}

L
Linus Torvalds 已提交
453 454
struct request_queue;

455
extern int submit_bio_wait(struct bio *bio);
K
Kent Overstreet 已提交
456 457
extern void bio_advance(struct bio *, unsigned);

458 459
extern void bio_init(struct bio *bio, struct bio_vec *table,
		     unsigned short max_vecs);
460
extern void bio_uninit(struct bio *);
K
Kent Overstreet 已提交
461
extern void bio_reset(struct bio *);
K
Kent Overstreet 已提交
462
void bio_chain(struct bio *, struct bio *);
L
Linus Torvalds 已提交
463 464

extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
465 466
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
			   unsigned int, unsigned int);
467 468
int bio_add_zone_append_page(struct bio *bio, struct page *page,
			     unsigned int len, unsigned int offset);
469
bool __bio_try_merge_page(struct bio *bio, struct page *page,
470
		unsigned int len, unsigned int off, bool *same_page);
471 472
void __bio_add_page(struct bio *bio, struct page *page,
		unsigned int len, unsigned int off);
473
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
474
void bio_release_pages(struct bio *bio, bool mark_dirty);
L
Linus Torvalds 已提交
475 476
extern void bio_set_pages_dirty(struct bio *bio);
extern void bio_check_pages_dirty(struct bio *bio);
477

478 479
extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
			       struct bio *src, struct bvec_iter *src_iter);
K
Kent Overstreet 已提交
480
extern void bio_copy_data(struct bio *dst, struct bio *src);
481
extern void bio_list_copy_data(struct bio *dst, struct bio *src);
482
extern void bio_free_pages(struct bio *bio);
483
void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
484
void bio_truncate(struct bio *bio, unsigned new_size);
485
void guard_bio_eod(struct bio *bio);
486 487 488 489 490 491

static inline void zero_fill_bio(struct bio *bio)
{
	zero_fill_bio_iter(bio, bio->bi_iter);
}

492
extern const char *bio_devname(struct bio *bio, char *buffer);
493

494 495
#define bio_set_dev(bio, bdev) 				\
do {							\
496
	bio_clear_flag(bio, BIO_REMAPPED);		\
497 498 499 500
	if ((bio)->bi_bdev != (bdev))			\
		bio_clear_flag(bio, BIO_THROTTLED);	\
	(bio)->bi_bdev = (bdev);			\
	bio_associate_blkg(bio);			\
501 502 503 504
} while (0)

#define bio_copy_dev(dst, src)			\
do {						\
505
	bio_clear_flag(dst, BIO_REMAPPED);		\
506
	(dst)->bi_bdev = (src)->bi_bdev;	\
507
	bio_clone_blkg_association(dst, src);	\
508 509 510
} while (0)

#define bio_dev(bio) \
511
	disk_devt((bio)->bi_bdev->bd_disk)
512

513
#ifdef CONFIG_BLK_CGROUP
514
void bio_associate_blkg(struct bio *bio);
515 516
void bio_associate_blkg_from_css(struct bio *bio,
				 struct cgroup_subsys_state *css);
517
void bio_clone_blkg_association(struct bio *dst, struct bio *src);
518
#else	/* CONFIG_BLK_CGROUP */
519
static inline void bio_associate_blkg(struct bio *bio) { }
520 521 522
static inline void bio_associate_blkg_from_css(struct bio *bio,
					       struct cgroup_subsys_state *css)
{ }
523 524
static inline void bio_clone_blkg_association(struct bio *dst,
					      struct bio *src) { }
525 526
#endif	/* CONFIG_BLK_CGROUP */

L
Linus Torvalds 已提交
527 528
#ifdef CONFIG_HIGHMEM
/*
529 530
 * remember never ever reenable interrupts between a bvec_kmap_irq and
 * bvec_kunmap_irq!
L
Linus Torvalds 已提交
531
 */
532
static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
L
Linus Torvalds 已提交
533 534 535 536 537 538 539 540
{
	unsigned long addr;

	/*
	 * might not be a highmem page, but the preempt/irq count
	 * balancing is a lot nicer this way
	 */
	local_irq_save(*flags);
541
	addr = (unsigned long) kmap_atomic(bvec->bv_page);
L
Linus Torvalds 已提交
542 543 544 545 546 547

	BUG_ON(addr & ~PAGE_MASK);

	return (char *) addr + bvec->bv_offset;
}

548
static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
L
Linus Torvalds 已提交
549 550 551
{
	unsigned long ptr = (unsigned long) buffer & PAGE_MASK;

552
	kunmap_atomic((void *) ptr);
L
Linus Torvalds 已提交
553 554 555 556
	local_irq_restore(*flags);
}

#else
557 558 559 560 561 562 563 564 565
static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
{
	return page_address(bvec->bv_page) + bvec->bv_offset;
}

static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
{
	*flags = 0;
}
L
Linus Torvalds 已提交
566 567
#endif

568
/*
569
 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589
 *
 * A bio_list anchors a singly-linked list of bios chained through the bi_next
 * member of the bio.  The bio_list also caches the last list member to allow
 * fast access to the tail.
 */
struct bio_list {
	struct bio *head;
	struct bio *tail;
};

static inline int bio_list_empty(const struct bio_list *bl)
{
	return bl->head == NULL;
}

static inline void bio_list_init(struct bio_list *bl)
{
	bl->head = bl->tail = NULL;
}

590 591
#define BIO_EMPTY_LIST	{ NULL, NULL }

592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654
#define bio_list_for_each(bio, bl) \
	for (bio = (bl)->head; bio; bio = bio->bi_next)

static inline unsigned bio_list_size(const struct bio_list *bl)
{
	unsigned sz = 0;
	struct bio *bio;

	bio_list_for_each(bio, bl)
		sz++;

	return sz;
}

static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
{
	bio->bi_next = NULL;

	if (bl->tail)
		bl->tail->bi_next = bio;
	else
		bl->head = bio;

	bl->tail = bio;
}

static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
{
	bio->bi_next = bl->head;

	bl->head = bio;

	if (!bl->tail)
		bl->tail = bio;
}

static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
{
	if (!bl2->head)
		return;

	if (bl->tail)
		bl->tail->bi_next = bl2->head;
	else
		bl->head = bl2->head;

	bl->tail = bl2->tail;
}

static inline void bio_list_merge_head(struct bio_list *bl,
				       struct bio_list *bl2)
{
	if (!bl2->head)
		return;

	if (bl->head)
		bl2->tail->bi_next = bl->head;
	else
		bl->tail = bl2->tail;

	bl->head = bl2->head;
}

G
Geert Uytterhoeven 已提交
655 656 657 658 659
static inline struct bio *bio_list_peek(struct bio_list *bl)
{
	return bl->head;
}

660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683
static inline struct bio *bio_list_pop(struct bio_list *bl)
{
	struct bio *bio = bl->head;

	if (bio) {
		bl->head = bl->head->bi_next;
		if (!bl->head)
			bl->tail = NULL;

		bio->bi_next = NULL;
	}

	return bio;
}

static inline struct bio *bio_list_get(struct bio_list *bl)
{
	struct bio *bio = bl->head;

	bl->head = bl->tail = NULL;

	return bio;
}

684 685 686 687 688 689 690 691 692 693 694
/*
 * Increment chain count for the bio. Make sure the CHAIN flag update
 * is visible before the raised count.
 */
static inline void bio_inc_remaining(struct bio *bio)
{
	bio_set_flag(bio, BIO_CHAIN);
	smp_mb__before_atomic();
	atomic_inc(&bio->__bi_remaining);
}

K
Kent Overstreet 已提交
695 696 697 698 699 700 701 702 703 704 705 706
/*
 * bio_set is used to allow other portions of the IO system to
 * allocate their own private memory pools for bio and iovec structures.
 * These memory pools in turn all allocate from the bio_slab
 * and the bvec_slabs[].
 */
#define BIO_POOL_SIZE 2

struct bio_set {
	struct kmem_cache *bio_slab;
	unsigned int front_pad;

707 708
	mempool_t bio_pool;
	mempool_t bvec_pool;
K
Kent Overstreet 已提交
709
#if defined(CONFIG_BLK_DEV_INTEGRITY)
710 711
	mempool_t bio_integrity_pool;
	mempool_t bvec_integrity_pool;
K
Kent Overstreet 已提交
712
#endif
713

714
	unsigned int back_pad;
715 716 717 718 719 720 721 722
	/*
	 * Deadlock avoidance for stacking block drivers: see comments in
	 * bio_alloc_bioset() for details
	 */
	spinlock_t		rescue_lock;
	struct bio_list		rescue_list;
	struct work_struct	rescue_work;
	struct workqueue_struct	*rescue_workqueue;
K
Kent Overstreet 已提交
723 724
};

725 726 727 728 729
static inline bool bioset_initialized(struct bio_set *bs)
{
	return bs->bio_slab != NULL;
}

730 731
#if defined(CONFIG_BLK_DEV_INTEGRITY)

732 733
#define bip_for_each_vec(bvl, bip, iter)				\
	for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
734

735 736 737 738
#define bio_for_each_integrity_vec(_bvl, _bio, _iter)			\
	for_each_bio(_bio)						\
		bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)

739 740
extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
741
extern bool bio_integrity_prep(struct bio *);
742
extern void bio_integrity_advance(struct bio *, unsigned int);
743
extern void bio_integrity_trim(struct bio *);
744
extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
745 746 747
extern int bioset_integrity_create(struct bio_set *, int);
extern void bioset_integrity_free(struct bio_set *);
extern void bio_integrity_init(void);
748 749 750

#else /* CONFIG_BLK_DEV_INTEGRITY */

751
static inline void *bio_integrity(struct bio *bio)
752
{
753
	return NULL;
754 755 756 757 758 759 760 761 762 763 764 765
}

static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
{
	return 0;
}

static inline void bioset_integrity_free (struct bio_set *bs)
{
	return;
}

766
static inline bool bio_integrity_prep(struct bio *bio)
767
{
768
	return true;
769 770
}

771
static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
772
				      gfp_t gfp_mask)
773 774 775
{
	return 0;
}
776 777 778 779 780 781 782

static inline void bio_integrity_advance(struct bio *bio,
					 unsigned int bytes_done)
{
	return;
}

783
static inline void bio_integrity_trim(struct bio *bio)
784 785 786 787 788 789 790 791
{
	return;
}

static inline void bio_integrity_init(void)
{
	return;
}
792

793 794 795 796 797
static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
{
	return false;
}

798 799 800 801 802 803 804 805 806 807 808 809
static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp,
								unsigned int nr)
{
	return ERR_PTR(-EINVAL);
}

static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
					unsigned int len, unsigned int offset)
{
	return 0;
}

810 811
#endif /* CONFIG_BLK_DEV_INTEGRITY */

J
Jens Axboe 已提交
812 813 814 815 816 817 818 819 820 821 822 823 824 825
/*
 * Mark a bio as polled. Note that for async polled IO, the caller must
 * expect -EWOULDBLOCK if we cannot allocate a request (or other resources).
 * We cannot block waiting for requests on polled IO, as those completions
 * must be found by the caller. This is different than IRQ driven IO, where
 * it's safe to wait for IO to complete.
 */
static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
{
	bio->bi_opf |= REQ_HIPRI;
	if (!is_sync_kiocb(kiocb))
		bio->bi_opf |= REQ_NOWAIT;
}

L
Linus Torvalds 已提交
826
#endif /* __LINUX_BIO_H */