bio.h 18.5 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
L
Linus Torvalds 已提交
2 3 4 5 6 7 8
/*
 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
 */
#ifndef __LINUX_BIO_H
#define __LINUX_BIO_H

#include <linux/mempool.h>
9 10
/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
#include <linux/blk_types.h>
11
#include <linux/uio.h>
12

13
#define BIO_MAX_VECS		256U
14 15 16

static inline unsigned int bio_max_segs(unsigned int nr_segs)
{
17
	return min(nr_segs, BIO_MAX_VECS);
18
}
L
Linus Torvalds 已提交
19

20 21
#define bio_prio(bio)			(bio)->bi_ioprio
#define bio_set_prio(bio, prio)		((bio)->bi_ioprio = prio)
22

K
Kent Overstreet 已提交
23 24 25 26 27 28 29 30 31 32 33 34 35
#define bio_iter_iovec(bio, iter)				\
	bvec_iter_bvec((bio)->bi_io_vec, (iter))

#define bio_iter_page(bio, iter)				\
	bvec_iter_page((bio)->bi_io_vec, (iter))
#define bio_iter_len(bio, iter)					\
	bvec_iter_len((bio)->bi_io_vec, (iter))
#define bio_iter_offset(bio, iter)				\
	bvec_iter_offset((bio)->bi_io_vec, (iter))

#define bio_page(bio)		bio_iter_page((bio), (bio)->bi_iter)
#define bio_offset(bio)		bio_iter_offset((bio), (bio)->bi_iter)
#define bio_iovec(bio)		bio_iter_iovec((bio), (bio)->bi_iter)
36

37 38 39 40 41
#define bvec_iter_sectors(iter)	((iter).bi_size >> 9)
#define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))

#define bio_sectors(bio)	bvec_iter_sectors((bio)->bi_iter)
#define bio_end_sector(bio)	bvec_iter_end_sector((bio)->bi_iter)
42

43 44 45 46 47 48
/*
 * Return the data direction, READ or WRITE.
 */
#define bio_data_dir(bio) \
	(op_is_write(bio_op(bio)) ? WRITE : READ)

49 50 51 52 53 54 55
/*
 * Check whether this bio carries any data or not. A NULL bio is allowed.
 */
static inline bool bio_has_data(struct bio *bio)
{
	if (bio &&
	    bio->bi_iter.bi_size &&
A
Adrian Hunter 已提交
56
	    bio_op(bio) != REQ_OP_DISCARD &&
57 58
	    bio_op(bio) != REQ_OP_SECURE_ERASE &&
	    bio_op(bio) != REQ_OP_WRITE_ZEROES)
59 60 61 62 63
		return true;

	return false;
}

64
static inline bool bio_no_advance_iter(const struct bio *bio)
65
{
A
Adrian Hunter 已提交
66 67
	return bio_op(bio) == REQ_OP_DISCARD ||
	       bio_op(bio) == REQ_OP_SECURE_ERASE ||
68 69
	       bio_op(bio) == REQ_OP_WRITE_SAME ||
	       bio_op(bio) == REQ_OP_WRITE_ZEROES;
70 71
}

72 73
static inline void *bio_data(struct bio *bio)
{
74
	if (bio_has_data(bio))
75 76 77 78
		return page_address(bio_page(bio)) + bio_offset(bio);

	return NULL;
}
L
Linus Torvalds 已提交
79

80 81 82 83 84 85 86 87 88
static inline bool bio_next_segment(const struct bio *bio,
				    struct bvec_iter_all *iter)
{
	if (iter->idx >= bio->bi_vcnt)
		return false;

	bvec_advance(&bio->bi_io_vec[iter->idx], iter);
	return true;
}
89

90 91 92 93
/*
 * drivers should _never_ use the all version - the bio may have been split
 * before it got to the driver and the driver won't own all of it
 */
94 95
#define bio_for_each_segment_all(bvl, bio, iter) \
	for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); )
96

97 98
static inline void bio_advance_iter(const struct bio *bio,
				    struct bvec_iter *iter, unsigned int bytes)
K
Kent Overstreet 已提交
99 100 101
{
	iter->bi_sector += bytes >> 9;

M
Ming Lei 已提交
102
	if (bio_no_advance_iter(bio))
K
Kent Overstreet 已提交
103
		iter->bi_size -= bytes;
M
Ming Lei 已提交
104
	else
K
Kent Overstreet 已提交
105
		bvec_iter_advance(bio->bi_io_vec, iter, bytes);
106
		/* TODO: It is reasonable to complete bio with error here. */
D
Dmitry Monakhov 已提交
107 108
}

P
Pavel Begunkov 已提交
109 110 111 112 113 114 115 116 117 118 119 120 121
/* @bytes should be less or equal to bvec[i->bi_idx].bv_len */
static inline void bio_advance_iter_single(const struct bio *bio,
					   struct bvec_iter *iter,
					   unsigned int bytes)
{
	iter->bi_sector += bytes >> 9;

	if (bio_no_advance_iter(bio))
		iter->bi_size -= bytes;
	else
		bvec_iter_advance_single(bio->bi_io_vec, iter, bytes);
}

122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
void __bio_advance(struct bio *, unsigned bytes);

/**
 * bio_advance - increment/complete a bio by some number of bytes
 * @bio:	bio to advance
 * @bytes:	number of bytes to complete
 *
 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
 * be updated on the last bvec as well.
 *
 * @bio will then represent the remaining, uncompleted portion of the io.
 */
static inline void bio_advance(struct bio *bio, unsigned int nbytes)
{
	if (nbytes == bio->bi_iter.bi_size) {
		bio->bi_iter.bi_size = 0;
		return;
	}
	__bio_advance(bio, nbytes);
}

144 145
#define __bio_for_each_segment(bvl, bio, iter, start)			\
	for (iter = (start);						\
K
Kent Overstreet 已提交
146 147
	     (iter).bi_size &&						\
		((bvl = bio_iter_iovec((bio), (iter))), 1);		\
P
Pavel Begunkov 已提交
148
	     bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
149 150 151 152

#define bio_for_each_segment(bvl, bio, iter)				\
	__bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)

153 154 155 156
#define __bio_for_each_bvec(bvl, bio, iter, start)		\
	for (iter = (start);						\
	     (iter).bi_size &&						\
		((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \
P
Pavel Begunkov 已提交
157
	     bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
158 159 160 161 162

/* iterate over multi-page bvec */
#define bio_for_each_bvec(bvl, bio, iter)			\
	__bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter)

163 164 165 166 167 168 169 170
/*
 * Iterate over all multi-page bvecs. Drivers shouldn't use this version for the
 * same reasons as bio_for_each_segment_all().
 */
#define bio_for_each_bvec_all(bvl, bio, i)		\
	for (i = 0, bvl = bio_first_bvec_all(bio);	\
	     i < (bio)->bi_vcnt; i++, bvl++)		\

K
Kent Overstreet 已提交
171
#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
L
Linus Torvalds 已提交
172

173
static inline unsigned bio_segments(struct bio *bio)
174 175 176 177 178
{
	unsigned segs = 0;
	struct bio_vec bv;
	struct bvec_iter iter;

179
	/*
180 181
	 * We special case discard/write same/write zeroes, because they
	 * interpret bi_size differently:
182 183
	 */

184 185 186 187
	switch (bio_op(bio)) {
	case REQ_OP_DISCARD:
	case REQ_OP_SECURE_ERASE:
	case REQ_OP_WRITE_ZEROES:
188 189
		return 0;
	case REQ_OP_WRITE_SAME:
190
		return 1;
191 192 193
	default:
		break;
	}
194

195
	bio_for_each_segment(bv, bio, iter)
196 197 198 199 200
		segs++;

	return segs;
}

L
Linus Torvalds 已提交
201 202 203 204 205 206 207 208 209 210 211 212 213 214
/*
 * get a reference to a bio, so it won't disappear. the intended use is
 * something like:
 *
 * bio_get(bio);
 * submit_bio(rw, bio);
 * if (bio->bi_flags ...)
 *	do_something
 * bio_put(bio);
 *
 * without the bio_get(), it could potentially complete I/O before submit_bio
 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
 * runs
 */
215 216 217 218 219 220 221 222 223 224 225
static inline void bio_get(struct bio *bio)
{
	bio->bi_flags |= (1 << BIO_REFFED);
	smp_mb__before_atomic();
	atomic_inc(&bio->__bi_cnt);
}

static inline void bio_cnt_set(struct bio *bio, unsigned int count)
{
	if (count != 1) {
		bio->bi_flags |= (1 << BIO_REFFED);
226
		smp_mb();
227 228 229
	}
	atomic_set(&bio->__bi_cnt, count);
}
L
Linus Torvalds 已提交
230

231 232
static inline bool bio_flagged(struct bio *bio, unsigned int bit)
{
233
	return (bio->bi_flags & (1U << bit)) != 0;
234 235 236 237
}

static inline void bio_set_flag(struct bio *bio, unsigned int bit)
{
238
	bio->bi_flags |= (1U << bit);
239 240 241 242
}

static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
{
243
	bio->bi_flags &= ~(1U << bit);
244 245
}

246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
static inline struct bio_vec *bio_first_bvec_all(struct bio *bio)
{
	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
	return bio->bi_io_vec;
}

static inline struct page *bio_first_page_all(struct bio *bio)
{
	return bio_first_bvec_all(bio)->bv_page;
}

static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
{
	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
	return &bio->bi_io_vec[bio->bi_vcnt - 1];
}

263 264 265 266 267 268 269 270
enum bip_flags {
	BIP_BLOCK_INTEGRITY	= 1 << 0, /* block layer owns integrity data */
	BIP_MAPPED_INTEGRITY	= 1 << 1, /* ref tag has been remapped */
	BIP_CTRL_NOCHECK	= 1 << 2, /* disable HBA integrity checking */
	BIP_DISK_NOCHECK	= 1 << 3, /* disable disk integrity checking */
	BIP_IP_CHECKSUM		= 1 << 4, /* IP checksum */
};

271 272 273 274 275 276
/*
 * bio integrity payload
 */
struct bio_integrity_payload {
	struct bio		*bip_bio;	/* parent bio */

277
	struct bvec_iter	bip_iter;
278 279

	unsigned short		bip_vcnt;	/* # of integrity bio_vecs */
280
	unsigned short		bip_max_vcnt;	/* integrity bio_vec slots */
281
	unsigned short		bip_flags;	/* control flags */
282

M
Ming Lei 已提交
283 284
	struct bvec_iter	bio_iter;	/* for rewinding parent bio */

285
	struct work_struct	bip_work;	/* I/O completion */
286 287

	struct bio_vec		*bip_vec;
288
	struct bio_vec		bip_inline_vecs[];/* embedded bvec array */
289
};
290

291 292 293 294
#if defined(CONFIG_BLK_DEV_INTEGRITY)

static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
{
J
Jens Axboe 已提交
295
	if (bio->bi_opf & REQ_INTEGRITY)
296 297 298 299 300
		return bio->bi_integrity;

	return NULL;
}

301 302 303 304 305 306 307 308 309
static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
{
	struct bio_integrity_payload *bip = bio_integrity(bio);

	if (bip)
		return bip->bip_flags & flag;

	return false;
}
310

311 312 313 314 315 316 317 318 319 320 321
static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
{
	return bip->bip_iter.bi_sector;
}

static inline void bip_set_seed(struct bio_integrity_payload *bip,
				sector_t seed)
{
	bip->bip_iter.bi_sector = seed;
}

322
#endif /* CONFIG_BLK_DEV_INTEGRITY */
L
Linus Torvalds 已提交
323

324
void bio_trim(struct bio *bio, sector_t offset, sector_t size);
K
Kent Overstreet 已提交
325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
extern struct bio *bio_split(struct bio *bio, int sectors,
			     gfp_t gfp, struct bio_set *bs);

/**
 * bio_next_split - get next @sectors from a bio, splitting if necessary
 * @bio:	bio to split
 * @sectors:	number of sectors to split from the front of @bio
 * @gfp:	gfp mask
 * @bs:		bio set to allocate from
 *
 * Returns a bio representing the next @sectors of @bio - if the bio is smaller
 * than @sectors, returns the original bio unchanged.
 */
static inline struct bio *bio_next_split(struct bio *bio, int sectors,
					 gfp_t gfp, struct bio_set *bs)
{
	if (sectors >= bio_sectors(bio))
		return bio;

	return bio_split(bio, sectors, gfp, bs);
}

347 348
enum {
	BIOSET_NEED_BVECS = BIT(0),
349
	BIOSET_NEED_RESCUER = BIT(1),
350
	BIOSET_PERCPU_CACHE = BIT(2),
351
};
K
Kent Overstreet 已提交
352 353
extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags);
extern void bioset_exit(struct bio_set *);
354
extern int biovec_init_pool(mempool_t *pool, int pool_entries);
355
extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
L
Linus Torvalds 已提交
356

357 358
struct bio *bio_alloc_bioset(gfp_t gfp, unsigned short nr_iovecs,
		struct bio_set *bs);
359 360
struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs,
		struct bio_set *bs);
361
struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs);
L
Linus Torvalds 已提交
362 363
extern void bio_put(struct bio *);

K
Kent Overstreet 已提交
364 365
extern void __bio_clone_fast(struct bio *, struct bio *);
extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
366

367
extern struct bio_set fs_bio_set;
368

369
static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned short nr_iovecs)
370
{
371
	return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set);
372 373
}

374
void submit_bio(struct bio *bio);
375

376 377 378 379
extern void bio_endio(struct bio *);

static inline void bio_io_error(struct bio *bio)
{
380
	bio->bi_status = BLK_STS_IOERR;
381 382 383
	bio_endio(bio);
}

384 385
static inline void bio_wouldblock_error(struct bio *bio)
{
386
	bio_set_flag(bio, BIO_QUIET);
387
	bio->bi_status = BLK_STS_AGAIN;
388 389 390
	bio_endio(bio);
}

391 392
/*
 * Calculate number of bvec segments that should be allocated to fit data
393 394
 * pointed by @iter. If @iter is backed by bvec it's going to be reused
 * instead of allocating a new one.
395 396 397
 */
static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs)
{
398 399
	if (iov_iter_is_bvec(iter))
		return 0;
400 401 402
	return iov_iter_npages(iter, max_segs);
}

L
Linus Torvalds 已提交
403 404
struct request_queue;

405
extern int submit_bio_wait(struct bio *bio);
406 407
extern void bio_init(struct bio *bio, struct bio_vec *table,
		     unsigned short max_vecs);
408
extern void bio_uninit(struct bio *);
K
Kent Overstreet 已提交
409
extern void bio_reset(struct bio *);
K
Kent Overstreet 已提交
410
void bio_chain(struct bio *, struct bio *);
L
Linus Torvalds 已提交
411 412

extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
413 414
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
			   unsigned int, unsigned int);
415 416
int bio_add_zone_append_page(struct bio *bio, struct page *page,
			     unsigned int len, unsigned int offset);
417 418
void __bio_add_page(struct bio *bio, struct page *page,
		unsigned int len, unsigned int off);
419
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
420
void bio_release_pages(struct bio *bio, bool mark_dirty);
L
Linus Torvalds 已提交
421 422
extern void bio_set_pages_dirty(struct bio *bio);
extern void bio_check_pages_dirty(struct bio *bio);
423

424 425
extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
			       struct bio *src, struct bvec_iter *src_iter);
K
Kent Overstreet 已提交
426
extern void bio_copy_data(struct bio *dst, struct bio *src);
427
extern void bio_free_pages(struct bio *bio);
428
void guard_bio_eod(struct bio *bio);
429
void zero_fill_bio(struct bio *bio);
430

431
extern const char *bio_devname(struct bio *bio, char *buffer);
432

433
#define bio_dev(bio) \
434
	disk_devt((bio)->bi_bdev->bd_disk)
435

436
#ifdef CONFIG_BLK_CGROUP
437
void bio_associate_blkg(struct bio *bio);
438 439
void bio_associate_blkg_from_css(struct bio *bio,
				 struct cgroup_subsys_state *css);
440
void bio_clone_blkg_association(struct bio *dst, struct bio *src);
441
#else	/* CONFIG_BLK_CGROUP */
442
static inline void bio_associate_blkg(struct bio *bio) { }
443 444 445
static inline void bio_associate_blkg_from_css(struct bio *bio,
					       struct cgroup_subsys_state *css)
{ }
446 447
static inline void bio_clone_blkg_association(struct bio *dst,
					      struct bio *src) { }
448 449
#endif	/* CONFIG_BLK_CGROUP */

450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465
static inline void bio_set_dev(struct bio *bio, struct block_device *bdev)
{
	bio_clear_flag(bio, BIO_REMAPPED);
	if (bio->bi_bdev != bdev)
		bio_clear_flag(bio, BIO_THROTTLED);
	bio->bi_bdev = bdev;
	bio_associate_blkg(bio);
}

static inline void bio_copy_dev(struct bio *dst, struct bio *src)
{
	bio_clear_flag(dst, BIO_REMAPPED);
	dst->bi_bdev = src->bi_bdev;
	bio_clone_blkg_association(dst, src);
}

466
/*
467
 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487
 *
 * A bio_list anchors a singly-linked list of bios chained through the bi_next
 * member of the bio.  The bio_list also caches the last list member to allow
 * fast access to the tail.
 */
struct bio_list {
	struct bio *head;
	struct bio *tail;
};

static inline int bio_list_empty(const struct bio_list *bl)
{
	return bl->head == NULL;
}

static inline void bio_list_init(struct bio_list *bl)
{
	bl->head = bl->tail = NULL;
}

488 489
#define BIO_EMPTY_LIST	{ NULL, NULL }

490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552
#define bio_list_for_each(bio, bl) \
	for (bio = (bl)->head; bio; bio = bio->bi_next)

static inline unsigned bio_list_size(const struct bio_list *bl)
{
	unsigned sz = 0;
	struct bio *bio;

	bio_list_for_each(bio, bl)
		sz++;

	return sz;
}

static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
{
	bio->bi_next = NULL;

	if (bl->tail)
		bl->tail->bi_next = bio;
	else
		bl->head = bio;

	bl->tail = bio;
}

static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
{
	bio->bi_next = bl->head;

	bl->head = bio;

	if (!bl->tail)
		bl->tail = bio;
}

static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
{
	if (!bl2->head)
		return;

	if (bl->tail)
		bl->tail->bi_next = bl2->head;
	else
		bl->head = bl2->head;

	bl->tail = bl2->tail;
}

static inline void bio_list_merge_head(struct bio_list *bl,
				       struct bio_list *bl2)
{
	if (!bl2->head)
		return;

	if (bl->head)
		bl2->tail->bi_next = bl->head;
	else
		bl->tail = bl2->tail;

	bl->head = bl2->head;
}

G
Geert Uytterhoeven 已提交
553 554 555 556 557
static inline struct bio *bio_list_peek(struct bio_list *bl)
{
	return bl->head;
}

558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581
static inline struct bio *bio_list_pop(struct bio_list *bl)
{
	struct bio *bio = bl->head;

	if (bio) {
		bl->head = bl->head->bi_next;
		if (!bl->head)
			bl->tail = NULL;

		bio->bi_next = NULL;
	}

	return bio;
}

static inline struct bio *bio_list_get(struct bio_list *bl)
{
	struct bio *bio = bl->head;

	bl->head = bl->tail = NULL;

	return bio;
}

582 583 584 585 586 587 588 589 590 591 592
/*
 * Increment chain count for the bio. Make sure the CHAIN flag update
 * is visible before the raised count.
 */
static inline void bio_inc_remaining(struct bio *bio)
{
	bio_set_flag(bio, BIO_CHAIN);
	smp_mb__before_atomic();
	atomic_inc(&bio->__bi_remaining);
}

K
Kent Overstreet 已提交
593 594 595 596 597 598 599 600 601 602 603 604
/*
 * bio_set is used to allow other portions of the IO system to
 * allocate their own private memory pools for bio and iovec structures.
 * These memory pools in turn all allocate from the bio_slab
 * and the bvec_slabs[].
 */
#define BIO_POOL_SIZE 2

struct bio_set {
	struct kmem_cache *bio_slab;
	unsigned int front_pad;

605 606 607 608 609
	/*
	 * per-cpu bio alloc cache
	 */
	struct bio_alloc_cache __percpu *cache;

610 611
	mempool_t bio_pool;
	mempool_t bvec_pool;
K
Kent Overstreet 已提交
612
#if defined(CONFIG_BLK_DEV_INTEGRITY)
613 614
	mempool_t bio_integrity_pool;
	mempool_t bvec_integrity_pool;
K
Kent Overstreet 已提交
615
#endif
616

617
	unsigned int back_pad;
618 619 620 621 622 623 624 625
	/*
	 * Deadlock avoidance for stacking block drivers: see comments in
	 * bio_alloc_bioset() for details
	 */
	spinlock_t		rescue_lock;
	struct bio_list		rescue_list;
	struct work_struct	rescue_work;
	struct workqueue_struct	*rescue_workqueue;
626 627 628 629 630

	/*
	 * Hot un-plug notifier for the per-cpu cache, if used
	 */
	struct hlist_node cpuhp_dead;
K
Kent Overstreet 已提交
631 632
};

633 634 635 636 637
static inline bool bioset_initialized(struct bio_set *bs)
{
	return bs->bio_slab != NULL;
}

638 639
#if defined(CONFIG_BLK_DEV_INTEGRITY)

640 641
#define bip_for_each_vec(bvl, bip, iter)				\
	for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
642

643 644 645 646
#define bio_for_each_integrity_vec(_bvl, _bio, _iter)			\
	for_each_bio(_bio)						\
		bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)

647 648
extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
649
extern bool bio_integrity_prep(struct bio *);
650
extern void bio_integrity_advance(struct bio *, unsigned int);
651
extern void bio_integrity_trim(struct bio *);
652
extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
653 654 655
extern int bioset_integrity_create(struct bio_set *, int);
extern void bioset_integrity_free(struct bio_set *);
extern void bio_integrity_init(void);
656 657 658

#else /* CONFIG_BLK_DEV_INTEGRITY */

659
static inline void *bio_integrity(struct bio *bio)
660
{
661
	return NULL;
662 663 664 665 666 667 668 669 670 671 672 673
}

static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
{
	return 0;
}

static inline void bioset_integrity_free (struct bio_set *bs)
{
	return;
}

674
static inline bool bio_integrity_prep(struct bio *bio)
675
{
676
	return true;
677 678
}

679
static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
680
				      gfp_t gfp_mask)
681 682 683
{
	return 0;
}
684 685 686 687 688 689 690

static inline void bio_integrity_advance(struct bio *bio,
					 unsigned int bytes_done)
{
	return;
}

691
static inline void bio_integrity_trim(struct bio *bio)
692 693 694 695 696 697 698 699
{
	return;
}

static inline void bio_integrity_init(void)
{
	return;
}
700

701 702 703 704 705
static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
{
	return false;
}

706 707 708 709 710 711 712 713 714 715 716 717
static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp,
								unsigned int nr)
{
	return ERR_PTR(-EINVAL);
}

static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
					unsigned int len, unsigned int offset)
{
	return 0;
}

718 719
#endif /* CONFIG_BLK_DEV_INTEGRITY */

J
Jens Axboe 已提交
720 721 722 723 724 725 726 727 728
/*
 * Mark a bio as polled. Note that for async polled IO, the caller must
 * expect -EWOULDBLOCK if we cannot allocate a request (or other resources).
 * We cannot block waiting for requests on polled IO, as those completions
 * must be found by the caller. This is different than IRQ driven IO, where
 * it's safe to wait for IO to complete.
 */
static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
{
729
	bio->bi_opf |= REQ_POLLED;
J
Jens Axboe 已提交
730 731 732 733
	if (!is_sync_kiocb(kiocb))
		bio->bi_opf |= REQ_NOWAIT;
}

734 735
struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp);

L
Linus Torvalds 已提交
736
#endif /* __LINUX_BIO_H */