bio.h 17.9 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
L
Linus Torvalds 已提交
2 3 4 5 6 7 8
/*
 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
 */
#ifndef __LINUX_BIO_H
#define __LINUX_BIO_H

#include <linux/mempool.h>
9 10
/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
#include <linux/blk_types.h>
11
#include <linux/uio.h>
12

13
#define BIO_MAX_VECS		256U
14 15 16

static inline unsigned int bio_max_segs(unsigned int nr_segs)
{
17
	return min(nr_segs, BIO_MAX_VECS);
18
}
L
Linus Torvalds 已提交
19

20 21
#define bio_prio(bio)			(bio)->bi_ioprio
#define bio_set_prio(bio, prio)		((bio)->bi_ioprio = prio)
22

K
Kent Overstreet 已提交
23 24 25 26 27 28 29 30 31 32 33 34 35
#define bio_iter_iovec(bio, iter)				\
	bvec_iter_bvec((bio)->bi_io_vec, (iter))

#define bio_iter_page(bio, iter)				\
	bvec_iter_page((bio)->bi_io_vec, (iter))
#define bio_iter_len(bio, iter)					\
	bvec_iter_len((bio)->bi_io_vec, (iter))
#define bio_iter_offset(bio, iter)				\
	bvec_iter_offset((bio)->bi_io_vec, (iter))

#define bio_page(bio)		bio_iter_page((bio), (bio)->bi_iter)
#define bio_offset(bio)		bio_iter_offset((bio), (bio)->bi_iter)
#define bio_iovec(bio)		bio_iter_iovec((bio), (bio)->bi_iter)
36

37 38 39 40 41
#define bvec_iter_sectors(iter)	((iter).bi_size >> 9)
#define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))

#define bio_sectors(bio)	bvec_iter_sectors((bio)->bi_iter)
#define bio_end_sector(bio)	bvec_iter_end_sector((bio)->bi_iter)
42

43 44 45 46 47 48
/*
 * Return the data direction, READ or WRITE.
 */
#define bio_data_dir(bio) \
	(op_is_write(bio_op(bio)) ? WRITE : READ)

49 50 51 52 53 54 55
/*
 * Check whether this bio carries any data or not. A NULL bio is allowed.
 */
static inline bool bio_has_data(struct bio *bio)
{
	if (bio &&
	    bio->bi_iter.bi_size &&
A
Adrian Hunter 已提交
56
	    bio_op(bio) != REQ_OP_DISCARD &&
57 58
	    bio_op(bio) != REQ_OP_SECURE_ERASE &&
	    bio_op(bio) != REQ_OP_WRITE_ZEROES)
59 60 61 62 63
		return true;

	return false;
}

64
static inline bool bio_no_advance_iter(const struct bio *bio)
65
{
A
Adrian Hunter 已提交
66 67
	return bio_op(bio) == REQ_OP_DISCARD ||
	       bio_op(bio) == REQ_OP_SECURE_ERASE ||
68 69
	       bio_op(bio) == REQ_OP_WRITE_SAME ||
	       bio_op(bio) == REQ_OP_WRITE_ZEROES;
70 71
}

72 73
static inline void *bio_data(struct bio *bio)
{
74
	if (bio_has_data(bio))
75 76 77 78
		return page_address(bio_page(bio)) + bio_offset(bio);

	return NULL;
}
L
Linus Torvalds 已提交
79

80 81 82 83 84 85 86 87 88
static inline bool bio_next_segment(const struct bio *bio,
				    struct bvec_iter_all *iter)
{
	if (iter->idx >= bio->bi_vcnt)
		return false;

	bvec_advance(&bio->bi_io_vec[iter->idx], iter);
	return true;
}
89

90 91 92 93
/*
 * drivers should _never_ use the all version - the bio may have been split
 * before it got to the driver and the driver won't own all of it
 */
94 95
#define bio_for_each_segment_all(bvl, bio, iter) \
	for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); )
96

97 98
static inline void bio_advance_iter(const struct bio *bio,
				    struct bvec_iter *iter, unsigned int bytes)
K
Kent Overstreet 已提交
99 100 101
{
	iter->bi_sector += bytes >> 9;

M
Ming Lei 已提交
102
	if (bio_no_advance_iter(bio))
K
Kent Overstreet 已提交
103
		iter->bi_size -= bytes;
M
Ming Lei 已提交
104
	else
K
Kent Overstreet 已提交
105
		bvec_iter_advance(bio->bi_io_vec, iter, bytes);
106
		/* TODO: It is reasonable to complete bio with error here. */
D
Dmitry Monakhov 已提交
107 108
}

P
Pavel Begunkov 已提交
109 110 111 112 113 114 115 116 117 118 119 120 121
/* @bytes should be less or equal to bvec[i->bi_idx].bv_len */
static inline void bio_advance_iter_single(const struct bio *bio,
					   struct bvec_iter *iter,
					   unsigned int bytes)
{
	iter->bi_sector += bytes >> 9;

	if (bio_no_advance_iter(bio))
		iter->bi_size -= bytes;
	else
		bvec_iter_advance_single(bio->bi_io_vec, iter, bytes);
}

122 123
#define __bio_for_each_segment(bvl, bio, iter, start)			\
	for (iter = (start);						\
K
Kent Overstreet 已提交
124 125
	     (iter).bi_size &&						\
		((bvl = bio_iter_iovec((bio), (iter))), 1);		\
P
Pavel Begunkov 已提交
126
	     bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
127 128 129 130

#define bio_for_each_segment(bvl, bio, iter)				\
	__bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)

131 132 133 134
#define __bio_for_each_bvec(bvl, bio, iter, start)		\
	for (iter = (start);						\
	     (iter).bi_size &&						\
		((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \
P
Pavel Begunkov 已提交
135
	     bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
136 137 138 139 140

/* iterate over multi-page bvec */
#define bio_for_each_bvec(bvl, bio, iter)			\
	__bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter)

141 142 143 144 145 146 147 148
/*
 * Iterate over all multi-page bvecs. Drivers shouldn't use this version for the
 * same reasons as bio_for_each_segment_all().
 */
#define bio_for_each_bvec_all(bvl, bio, i)		\
	for (i = 0, bvl = bio_first_bvec_all(bio);	\
	     i < (bio)->bi_vcnt; i++, bvl++)		\

K
Kent Overstreet 已提交
149
#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
L
Linus Torvalds 已提交
150

151
static inline unsigned bio_segments(struct bio *bio)
152 153 154 155 156
{
	unsigned segs = 0;
	struct bio_vec bv;
	struct bvec_iter iter;

157
	/*
158 159
	 * We special case discard/write same/write zeroes, because they
	 * interpret bi_size differently:
160 161
	 */

162 163 164 165
	switch (bio_op(bio)) {
	case REQ_OP_DISCARD:
	case REQ_OP_SECURE_ERASE:
	case REQ_OP_WRITE_ZEROES:
166 167
		return 0;
	case REQ_OP_WRITE_SAME:
168
		return 1;
169 170 171
	default:
		break;
	}
172

173
	bio_for_each_segment(bv, bio, iter)
174 175 176 177 178
		segs++;

	return segs;
}

L
Linus Torvalds 已提交
179 180 181 182 183 184 185 186 187 188 189 190 191 192
/*
 * get a reference to a bio, so it won't disappear. the intended use is
 * something like:
 *
 * bio_get(bio);
 * submit_bio(rw, bio);
 * if (bio->bi_flags ...)
 *	do_something
 * bio_put(bio);
 *
 * without the bio_get(), it could potentially complete I/O before submit_bio
 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
 * runs
 */
193 194 195 196 197 198 199 200 201 202 203
static inline void bio_get(struct bio *bio)
{
	bio->bi_flags |= (1 << BIO_REFFED);
	smp_mb__before_atomic();
	atomic_inc(&bio->__bi_cnt);
}

static inline void bio_cnt_set(struct bio *bio, unsigned int count)
{
	if (count != 1) {
		bio->bi_flags |= (1 << BIO_REFFED);
204
		smp_mb();
205 206 207
	}
	atomic_set(&bio->__bi_cnt, count);
}
L
Linus Torvalds 已提交
208

209 210
static inline bool bio_flagged(struct bio *bio, unsigned int bit)
{
211
	return (bio->bi_flags & (1U << bit)) != 0;
212 213 214 215
}

static inline void bio_set_flag(struct bio *bio, unsigned int bit)
{
216
	bio->bi_flags |= (1U << bit);
217 218 219 220
}

static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
{
221
	bio->bi_flags &= ~(1U << bit);
222 223
}

224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
static inline struct bio_vec *bio_first_bvec_all(struct bio *bio)
{
	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
	return bio->bi_io_vec;
}

static inline struct page *bio_first_page_all(struct bio *bio)
{
	return bio_first_bvec_all(bio)->bv_page;
}

static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
{
	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
	return &bio->bi_io_vec[bio->bi_vcnt - 1];
}

241 242 243 244 245 246 247 248
enum bip_flags {
	BIP_BLOCK_INTEGRITY	= 1 << 0, /* block layer owns integrity data */
	BIP_MAPPED_INTEGRITY	= 1 << 1, /* ref tag has been remapped */
	BIP_CTRL_NOCHECK	= 1 << 2, /* disable HBA integrity checking */
	BIP_DISK_NOCHECK	= 1 << 3, /* disable disk integrity checking */
	BIP_IP_CHECKSUM		= 1 << 4, /* IP checksum */
};

249 250 251 252 253 254
/*
 * bio integrity payload
 */
struct bio_integrity_payload {
	struct bio		*bip_bio;	/* parent bio */

255
	struct bvec_iter	bip_iter;
256 257

	unsigned short		bip_vcnt;	/* # of integrity bio_vecs */
258
	unsigned short		bip_max_vcnt;	/* integrity bio_vec slots */
259
	unsigned short		bip_flags;	/* control flags */
260

M
Ming Lei 已提交
261 262
	struct bvec_iter	bio_iter;	/* for rewinding parent bio */

263
	struct work_struct	bip_work;	/* I/O completion */
264 265

	struct bio_vec		*bip_vec;
266
	struct bio_vec		bip_inline_vecs[];/* embedded bvec array */
267
};
268

269 270 271 272
#if defined(CONFIG_BLK_DEV_INTEGRITY)

static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
{
J
Jens Axboe 已提交
273
	if (bio->bi_opf & REQ_INTEGRITY)
274 275 276 277 278
		return bio->bi_integrity;

	return NULL;
}

279 280 281 282 283 284 285 286 287
static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
{
	struct bio_integrity_payload *bip = bio_integrity(bio);

	if (bip)
		return bip->bip_flags & flag;

	return false;
}
288

289 290 291 292 293 294 295 296 297 298 299
static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
{
	return bip->bip_iter.bi_sector;
}

static inline void bip_set_seed(struct bio_integrity_payload *bip,
				sector_t seed)
{
	bip->bip_iter.bi_sector = seed;
}

300
#endif /* CONFIG_BLK_DEV_INTEGRITY */
L
Linus Torvalds 已提交
301

302
void bio_trim(struct bio *bio, sector_t offset, sector_t size);
K
Kent Overstreet 已提交
303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
extern struct bio *bio_split(struct bio *bio, int sectors,
			     gfp_t gfp, struct bio_set *bs);

/**
 * bio_next_split - get next @sectors from a bio, splitting if necessary
 * @bio:	bio to split
 * @sectors:	number of sectors to split from the front of @bio
 * @gfp:	gfp mask
 * @bs:		bio set to allocate from
 *
 * Returns a bio representing the next @sectors of @bio - if the bio is smaller
 * than @sectors, returns the original bio unchanged.
 */
static inline struct bio *bio_next_split(struct bio *bio, int sectors,
					 gfp_t gfp, struct bio_set *bs)
{
	if (sectors >= bio_sectors(bio))
		return bio;

	return bio_split(bio, sectors, gfp, bs);
}

325 326
enum {
	BIOSET_NEED_BVECS = BIT(0),
327
	BIOSET_NEED_RESCUER = BIT(1),
328
	BIOSET_PERCPU_CACHE = BIT(2),
329
};
K
Kent Overstreet 已提交
330 331
extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags);
extern void bioset_exit(struct bio_set *);
332
extern int biovec_init_pool(mempool_t *pool, int pool_entries);
333
extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
L
Linus Torvalds 已提交
334

335 336
struct bio *bio_alloc_bioset(gfp_t gfp, unsigned short nr_iovecs,
		struct bio_set *bs);
337 338
struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs,
		struct bio_set *bs);
339
struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs);
L
Linus Torvalds 已提交
340 341
extern void bio_put(struct bio *);

K
Kent Overstreet 已提交
342 343
extern void __bio_clone_fast(struct bio *, struct bio *);
extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
344

345
extern struct bio_set fs_bio_set;
346

347
static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned short nr_iovecs)
348
{
349
	return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set);
350 351
}

352 353
extern blk_qc_t submit_bio(struct bio *);

354 355 356 357
extern void bio_endio(struct bio *);

static inline void bio_io_error(struct bio *bio)
{
358
	bio->bi_status = BLK_STS_IOERR;
359 360 361
	bio_endio(bio);
}

362 363
static inline void bio_wouldblock_error(struct bio *bio)
{
364
	bio_set_flag(bio, BIO_QUIET);
365
	bio->bi_status = BLK_STS_AGAIN;
366 367 368
	bio_endio(bio);
}

369 370
/*
 * Calculate number of bvec segments that should be allocated to fit data
371 372
 * pointed by @iter. If @iter is backed by bvec it's going to be reused
 * instead of allocating a new one.
373 374 375
 */
static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs)
{
376 377
	if (iov_iter_is_bvec(iter))
		return 0;
378 379 380
	return iov_iter_npages(iter, max_segs);
}

L
Linus Torvalds 已提交
381 382
struct request_queue;

383
extern int submit_bio_wait(struct bio *bio);
K
Kent Overstreet 已提交
384 385
extern void bio_advance(struct bio *, unsigned);

386 387
extern void bio_init(struct bio *bio, struct bio_vec *table,
		     unsigned short max_vecs);
388
extern void bio_uninit(struct bio *);
K
Kent Overstreet 已提交
389
extern void bio_reset(struct bio *);
K
Kent Overstreet 已提交
390
void bio_chain(struct bio *, struct bio *);
L
Linus Torvalds 已提交
391 392

extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
393 394
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
			   unsigned int, unsigned int);
395 396
int bio_add_zone_append_page(struct bio *bio, struct page *page,
			     unsigned int len, unsigned int offset);
397 398
void __bio_add_page(struct bio *bio, struct page *page,
		unsigned int len, unsigned int off);
399
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
400
void bio_release_pages(struct bio *bio, bool mark_dirty);
L
Linus Torvalds 已提交
401 402
extern void bio_set_pages_dirty(struct bio *bio);
extern void bio_check_pages_dirty(struct bio *bio);
403

404 405
extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
			       struct bio *src, struct bvec_iter *src_iter);
K
Kent Overstreet 已提交
406
extern void bio_copy_data(struct bio *dst, struct bio *src);
407
extern void bio_free_pages(struct bio *bio);
408
void guard_bio_eod(struct bio *bio);
409
void zero_fill_bio(struct bio *bio);
410

411
extern const char *bio_devname(struct bio *bio, char *buffer);
412

413 414
#define bio_set_dev(bio, bdev) 				\
do {							\
415
	bio_clear_flag(bio, BIO_REMAPPED);		\
416 417 418 419
	if ((bio)->bi_bdev != (bdev))			\
		bio_clear_flag(bio, BIO_THROTTLED);	\
	(bio)->bi_bdev = (bdev);			\
	bio_associate_blkg(bio);			\
420 421 422 423
} while (0)

#define bio_copy_dev(dst, src)			\
do {						\
424
	bio_clear_flag(dst, BIO_REMAPPED);		\
425
	(dst)->bi_bdev = (src)->bi_bdev;	\
426
	bio_clone_blkg_association(dst, src);	\
427 428 429
} while (0)

#define bio_dev(bio) \
430
	disk_devt((bio)->bi_bdev->bd_disk)
431

432
#ifdef CONFIG_BLK_CGROUP
433
void bio_associate_blkg(struct bio *bio);
434 435
void bio_associate_blkg_from_css(struct bio *bio,
				 struct cgroup_subsys_state *css);
436
void bio_clone_blkg_association(struct bio *dst, struct bio *src);
437
#else	/* CONFIG_BLK_CGROUP */
438
static inline void bio_associate_blkg(struct bio *bio) { }
439 440 441
static inline void bio_associate_blkg_from_css(struct bio *bio,
					       struct cgroup_subsys_state *css)
{ }
442 443
static inline void bio_clone_blkg_association(struct bio *dst,
					      struct bio *src) { }
444 445
#endif	/* CONFIG_BLK_CGROUP */

446
/*
447
 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467
 *
 * A bio_list anchors a singly-linked list of bios chained through the bi_next
 * member of the bio.  The bio_list also caches the last list member to allow
 * fast access to the tail.
 */
struct bio_list {
	struct bio *head;
	struct bio *tail;
};

static inline int bio_list_empty(const struct bio_list *bl)
{
	return bl->head == NULL;
}

static inline void bio_list_init(struct bio_list *bl)
{
	bl->head = bl->tail = NULL;
}

468 469
#define BIO_EMPTY_LIST	{ NULL, NULL }

470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
#define bio_list_for_each(bio, bl) \
	for (bio = (bl)->head; bio; bio = bio->bi_next)

static inline unsigned bio_list_size(const struct bio_list *bl)
{
	unsigned sz = 0;
	struct bio *bio;

	bio_list_for_each(bio, bl)
		sz++;

	return sz;
}

static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
{
	bio->bi_next = NULL;

	if (bl->tail)
		bl->tail->bi_next = bio;
	else
		bl->head = bio;

	bl->tail = bio;
}

static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
{
	bio->bi_next = bl->head;

	bl->head = bio;

	if (!bl->tail)
		bl->tail = bio;
}

static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
{
	if (!bl2->head)
		return;

	if (bl->tail)
		bl->tail->bi_next = bl2->head;
	else
		bl->head = bl2->head;

	bl->tail = bl2->tail;
}

static inline void bio_list_merge_head(struct bio_list *bl,
				       struct bio_list *bl2)
{
	if (!bl2->head)
		return;

	if (bl->head)
		bl2->tail->bi_next = bl->head;
	else
		bl->tail = bl2->tail;

	bl->head = bl2->head;
}

G
Geert Uytterhoeven 已提交
533 534 535 536 537
static inline struct bio *bio_list_peek(struct bio_list *bl)
{
	return bl->head;
}

538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561
static inline struct bio *bio_list_pop(struct bio_list *bl)
{
	struct bio *bio = bl->head;

	if (bio) {
		bl->head = bl->head->bi_next;
		if (!bl->head)
			bl->tail = NULL;

		bio->bi_next = NULL;
	}

	return bio;
}

static inline struct bio *bio_list_get(struct bio_list *bl)
{
	struct bio *bio = bl->head;

	bl->head = bl->tail = NULL;

	return bio;
}

562 563 564 565 566 567 568 569 570 571 572
/*
 * Increment chain count for the bio. Make sure the CHAIN flag update
 * is visible before the raised count.
 */
static inline void bio_inc_remaining(struct bio *bio)
{
	bio_set_flag(bio, BIO_CHAIN);
	smp_mb__before_atomic();
	atomic_inc(&bio->__bi_remaining);
}

K
Kent Overstreet 已提交
573 574 575 576 577 578 579 580 581 582 583 584
/*
 * bio_set is used to allow other portions of the IO system to
 * allocate their own private memory pools for bio and iovec structures.
 * These memory pools in turn all allocate from the bio_slab
 * and the bvec_slabs[].
 */
#define BIO_POOL_SIZE 2

struct bio_set {
	struct kmem_cache *bio_slab;
	unsigned int front_pad;

585 586 587 588 589
	/*
	 * per-cpu bio alloc cache
	 */
	struct bio_alloc_cache __percpu *cache;

590 591
	mempool_t bio_pool;
	mempool_t bvec_pool;
K
Kent Overstreet 已提交
592
#if defined(CONFIG_BLK_DEV_INTEGRITY)
593 594
	mempool_t bio_integrity_pool;
	mempool_t bvec_integrity_pool;
K
Kent Overstreet 已提交
595
#endif
596

597
	unsigned int back_pad;
598 599 600 601 602 603 604 605
	/*
	 * Deadlock avoidance for stacking block drivers: see comments in
	 * bio_alloc_bioset() for details
	 */
	spinlock_t		rescue_lock;
	struct bio_list		rescue_list;
	struct work_struct	rescue_work;
	struct workqueue_struct	*rescue_workqueue;
606 607 608 609 610

	/*
	 * Hot un-plug notifier for the per-cpu cache, if used
	 */
	struct hlist_node cpuhp_dead;
K
Kent Overstreet 已提交
611 612
};

613 614 615 616 617
static inline bool bioset_initialized(struct bio_set *bs)
{
	return bs->bio_slab != NULL;
}

618 619
#if defined(CONFIG_BLK_DEV_INTEGRITY)

620 621
#define bip_for_each_vec(bvl, bip, iter)				\
	for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
622

623 624 625 626
#define bio_for_each_integrity_vec(_bvl, _bio, _iter)			\
	for_each_bio(_bio)						\
		bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)

627 628
extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
629
extern bool bio_integrity_prep(struct bio *);
630
extern void bio_integrity_advance(struct bio *, unsigned int);
631
extern void bio_integrity_trim(struct bio *);
632
extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
633 634 635
extern int bioset_integrity_create(struct bio_set *, int);
extern void bioset_integrity_free(struct bio_set *);
extern void bio_integrity_init(void);
636 637 638

#else /* CONFIG_BLK_DEV_INTEGRITY */

639
static inline void *bio_integrity(struct bio *bio)
640
{
641
	return NULL;
642 643 644 645 646 647 648 649 650 651 652 653
}

static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
{
	return 0;
}

static inline void bioset_integrity_free (struct bio_set *bs)
{
	return;
}

654
static inline bool bio_integrity_prep(struct bio *bio)
655
{
656
	return true;
657 658
}

659
static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
660
				      gfp_t gfp_mask)
661 662 663
{
	return 0;
}
664 665 666 667 668 669 670

static inline void bio_integrity_advance(struct bio *bio,
					 unsigned int bytes_done)
{
	return;
}

671
static inline void bio_integrity_trim(struct bio *bio)
672 673 674 675 676 677 678 679
{
	return;
}

static inline void bio_integrity_init(void)
{
	return;
}
680

681 682 683 684 685
static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
{
	return false;
}

686 687 688 689 690 691 692 693 694 695 696 697
static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp,
								unsigned int nr)
{
	return ERR_PTR(-EINVAL);
}

static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
					unsigned int len, unsigned int offset)
{
	return 0;
}

698 699
#endif /* CONFIG_BLK_DEV_INTEGRITY */

J
Jens Axboe 已提交
700 701 702 703 704 705 706 707 708 709 710 711 712 713
/*
 * Mark a bio as polled. Note that for async polled IO, the caller must
 * expect -EWOULDBLOCK if we cannot allocate a request (or other resources).
 * We cannot block waiting for requests on polled IO, as those completions
 * must be found by the caller. This is different than IRQ driven IO, where
 * it's safe to wait for IO to complete.
 */
static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
{
	bio->bi_opf |= REQ_HIPRI;
	if (!is_sync_kiocb(kiocb))
		bio->bi_opf |= REQ_NOWAIT;
}

714 715
struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp);

L
Linus Torvalds 已提交
716
#endif /* __LINUX_BIO_H */