blk.h 14.7 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4
#ifndef BLK_INTERNAL_H
#define BLK_INTERNAL_H

5
#include <linux/blk-crypto.h>
6
#include <linux/memblock.h>	/* for max_pfn/max_low_pfn */
7
#include <xen/xen.h>
8
#include "blk-crypto-internal.h"
9

10 11
struct elevator_type;

12 13 14
/* Max future timer expiry for timeouts */
#define BLK_MAX_TIMEOUT		(5 * HZ)

15 16
extern struct dentry *blk_debugfs_root;

17 18 19
struct blk_flush_queue {
	unsigned int		flush_pending_idx:1;
	unsigned int		flush_running_idx:1;
20
	blk_status_t 		rq_status;
21 22 23 24
	unsigned long		flush_pending_since;
	struct list_head	flush_queue[2];
	struct list_head	flush_data_in_flight;
	struct request		*flush_rq;
25

26 27 28
	spinlock_t		mq_flush_lock;
};

29
extern struct kmem_cache *blk_requestq_cachep;
30
extern struct kmem_cache *blk_requestq_srcu_cachep;
31
extern struct kobj_type blk_queue_ktype;
32
extern struct ida blk_queue_ida;
33

M
Ming Lei 已提交
34
bool is_flush_rq(struct request *req);
35

36 37
struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
					      gfp_t flags);
38
void blk_free_flush_queue(struct blk_flush_queue *q);
39

40
void blk_freeze_queue(struct request_queue *q);
41
void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
42
void blk_queue_start_drain(struct request_queue *q);
43
int __bio_queue_enter(struct request_queue *q, struct bio *bio);
44
void submit_bio_noacct_nocheck(struct bio *bio);
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77

static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
{
	rcu_read_lock();
	if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
		goto fail;

	/*
	 * The code that increments the pm_only counter must ensure that the
	 * counter is globally visible before the queue is unfrozen.
	 */
	if (blk_queue_pm_only(q) &&
	    (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
		goto fail_put;

	rcu_read_unlock();
	return true;

fail_put:
	blk_queue_exit(q);
fail:
	rcu_read_unlock();
	return false;
}

static inline int bio_queue_enter(struct bio *bio)
{
	struct request_queue *q = bdev_get_queue(bio->bi_bdev);

	if (blk_try_enter_queue(q, false))
		return 0;
	return __bio_queue_enter(q, bio);
}
78

79
#define BIO_INLINE_VECS 4
80 81 82
struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
		gfp_t gfp_mask);
void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs);
83

84 85
static inline bool biovec_phys_mergeable(struct request_queue *q,
		struct bio_vec *vec1, struct bio_vec *vec2)
86
{
87
	unsigned long mask = queue_segment_boundary(q);
C
Christoph Hellwig 已提交
88 89
	phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
	phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
90

91 92 93 94 95 96 97
	/*
	 * Merging adjacent physical pages may not work correctly under KMSAN
	 * if their metadata pages aren't adjacent. Just disable merging.
	 */
	if (IS_ENABLED(CONFIG_KMSAN))
		return false;

98
	if (addr1 + vec1->bv_len != addr2)
99
		return false;
100
	if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
101
		return false;
102 103
	if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
		return false;
104 105 106
	return true;
}

107
static inline bool __bvec_gap_to_prev(struct queue_limits *lim,
108 109
		struct bio_vec *bprv, unsigned int offset)
{
110 111
	return (offset & lim->virt_boundary_mask) ||
		((bprv->bv_offset + bprv->bv_len) & lim->virt_boundary_mask);
112 113 114 115 116 117
}

/*
 * Check if adding a bio_vec after bprv with offset would create a gap in
 * the SG list. Most drivers don't care about this, but some do.
 */
118
static inline bool bvec_gap_to_prev(struct queue_limits *lim,
119 120
		struct bio_vec *bprv, unsigned int offset)
{
121
	if (!lim->virt_boundary_mask)
122
		return false;
123
	return __bvec_gap_to_prev(lim, bprv, offset);
124 125
}

126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
static inline bool rq_mergeable(struct request *rq)
{
	if (blk_rq_is_passthrough(rq))
		return false;

	if (req_op(rq) == REQ_OP_FLUSH)
		return false;

	if (req_op(rq) == REQ_OP_WRITE_ZEROES)
		return false;

	if (req_op(rq) == REQ_OP_ZONE_APPEND)
		return false;

	if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
		return false;
	if (rq->rq_flags & RQF_NOMERGE_FLAGS)
		return false;

	return true;
}

/*
 * There are two different ways to handle DISCARD merges:
 *  1) If max_discard_segments > 1, the driver treats every bio as a range and
 *     send the bios to controller together. The ranges don't need to be
 *     contiguous.
 *  2) Otherwise, the request will be normal read/write requests.  The ranges
 *     need to be contiguous.
 */
static inline bool blk_discard_mergable(struct request *req)
{
	if (req_op(req) == REQ_OP_DISCARD &&
	    queue_max_discard_segments(req->q) > 1)
		return true;
	return false;
}

164
static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
165
						     enum req_op op)
166 167 168 169 170 171 172 173 174 175 176
{
	if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
		return min(q->limits.max_discard_sectors,
			   UINT_MAX >> SECTOR_SHIFT);

	if (unlikely(op == REQ_OP_WRITE_ZEROES))
		return q->limits.max_write_zeroes_sectors;

	return q->limits.max_sectors;
}

177 178
#ifdef CONFIG_BLK_DEV_INTEGRITY
void blk_flush_integrity(void);
179
bool __bio_integrity_endio(struct bio *);
180
void bio_integrity_free(struct bio *bio);
181 182 183 184 185 186
static inline bool bio_integrity_endio(struct bio *bio)
{
	if (bio_integrity(bio))
		return __bio_integrity_endio(bio);
	return true;
}
187

188 189
bool blk_integrity_merge_rq(struct request_queue *, struct request *,
		struct request *);
190 191
bool blk_integrity_merge_bio(struct request_queue *, struct request *,
		struct bio *);
192

193 194 195 196 197 198
static inline bool integrity_req_gap_back_merge(struct request *req,
		struct bio *next)
{
	struct bio_integrity_payload *bip = bio_integrity(req->bio);
	struct bio_integrity_payload *bip_next = bio_integrity(next);

199 200
	return bvec_gap_to_prev(&req->q->limits,
				&bip->bip_vec[bip->bip_vcnt - 1],
201 202 203 204 205 206 207 208 209
				bip_next->bip_vec[0].bv_offset);
}

static inline bool integrity_req_gap_front_merge(struct request *req,
		struct bio *bio)
{
	struct bio_integrity_payload *bip = bio_integrity(bio);
	struct bio_integrity_payload *bip_next = bio_integrity(req->bio);

210 211
	return bvec_gap_to_prev(&req->q->limits,
				&bip->bip_vec[bip->bip_vcnt - 1],
212 213
				bip_next->bip_vec[0].bv_offset);
}
214

215
int blk_integrity_add(struct gendisk *disk);
216
void blk_integrity_del(struct gendisk *);
217
#else /* CONFIG_BLK_DEV_INTEGRITY */
218 219 220 221 222
static inline bool blk_integrity_merge_rq(struct request_queue *rq,
		struct request *r1, struct request *r2)
{
	return true;
}
223 224 225 226 227
static inline bool blk_integrity_merge_bio(struct request_queue *rq,
		struct request *r, struct bio *b)
{
	return true;
}
228 229 230 231 232 233 234 235 236 237 238
static inline bool integrity_req_gap_back_merge(struct request *req,
		struct bio *next)
{
	return false;
}
static inline bool integrity_req_gap_front_merge(struct request *req,
		struct bio *bio)
{
	return false;
}

239 240 241
static inline void blk_flush_integrity(void)
{
}
242 243 244 245
static inline bool bio_integrity_endio(struct bio *bio)
{
	return true;
}
246 247 248
static inline void bio_integrity_free(struct bio *bio)
{
}
249
static inline int blk_integrity_add(struct gendisk *disk)
250
{
251
	return 0;
252 253 254 255
}
static inline void blk_integrity_del(struct gendisk *disk)
{
}
256
#endif /* CONFIG_BLK_DEV_INTEGRITY */
257

258
unsigned long blk_rq_timeout(unsigned long timeout);
259
void blk_add_timer(struct request *req);
260
const char *blk_status_to_str(blk_status_t status);
261 262

bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
263
		unsigned int nr_segs);
264 265
bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
			struct bio *bio, unsigned int nr_segs);
266

267 268 269 270 271 272
/*
 * Plug flush limits
 */
#define BLK_MAX_REQUEST_COUNT	32
#define BLK_PLUG_FLUSH_SIZE	(128 * 1024)

273 274 275
/*
 * Internal elevator interface
 */
276
#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
277

278
void blk_insert_flush(struct request *rq);
T
Tejun Heo 已提交
279

280
int elevator_switch(struct request_queue *q, struct elevator_type *new_e);
281
void elevator_exit(struct request_queue *q);
282
int elv_register_queue(struct request_queue *q, bool uevent);
283 284
void elv_unregister_queue(struct request_queue *q);

285 286 287 288 289 290 291 292 293 294
ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
		char *buf);
ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
		char *buf);
ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
		char *buf);
ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
		char *buf);
ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
		const char *buf, size_t count);
295 296 297 298
ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
ssize_t part_timeout_store(struct device *, struct device_attribute *,
				const char *, size_t);

299 300
static inline bool bio_may_exceed_limits(struct bio *bio,
		struct queue_limits *lim)
301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
{
	switch (bio_op(bio)) {
	case REQ_OP_DISCARD:
	case REQ_OP_SECURE_ERASE:
	case REQ_OP_WRITE_ZEROES:
		return true; /* non-trivial splitting decisions */
	default:
		break;
	}

	/*
	 * All drivers must accept single-segments bios that are <= PAGE_SIZE.
	 * This is a quick and dirty check that relies on the fact that
	 * bi_io_vec[0] is always valid if a bio has data.  The check might
	 * lead to occasional false negatives when bios are cloned, but compared
	 * to the performance impact of cloned bios themselves the loop below
	 * doesn't matter anyway.
	 */
319
	return lim->chunk_sectors || bio->bi_vcnt != 1 ||
320 321 322
		bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
}

323
struct bio *__bio_split_to_limits(struct bio *bio, struct queue_limits *lim,
324
		       unsigned int *nr_segs);
325 326
int ll_back_merge_fn(struct request *req, struct bio *bio,
		unsigned int nr_segs);
327
bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
328
				struct request *next);
329
unsigned int blk_recalc_rq_segments(struct request *rq);
330
void blk_rq_set_mixed_merge(struct request *rq);
331
bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
332
enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
333

334 335
int blk_dev_init(void);

336 337 338 339
/*
 * Contribute to IO statistics IFF:
 *
 *	a) it's attached to a gendisk, and
340
 *	b) the queue had IO stats enabled when this request was started
341
 */
342
static inline bool blk_do_io_stat(struct request *rq)
343
{
344
	return (rq->rq_flags & RQF_IO_STAT) && !blk_rq_is_passthrough(rq);
345 346
}

347
void update_io_ticks(struct block_device *part, unsigned long now, bool end);
348

349 350 351 352 353 354 355
static inline void req_set_nomerge(struct request_queue *q, struct request *req)
{
	req->cmd_flags |= REQ_NOMERGE;
	if (req == q->last_merge)
		q->last_merge = NULL;
}

356 357 358
/*
 * Internal io_context interface
 */
359
struct io_cq *ioc_find_get_icq(struct request_queue *q);
C
Christoph Hellwig 已提交
360
struct io_cq *ioc_lookup_icq(struct request_queue *q);
361
#ifdef CONFIG_BLK_ICQ
362
void ioc_clear_queue(struct request_queue *q);
363 364 365 366 367
#else
static inline void ioc_clear_queue(struct request_queue *q)
{
}
#endif /* CONFIG_BLK_ICQ */
368

369 370 371 372
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
	const char *page, size_t count);
373
extern void blk_throtl_bio_endio(struct bio *bio);
374
extern void blk_throtl_stat_add(struct request *rq, u64 time);
375 376
#else
static inline void blk_throtl_bio_endio(struct bio *bio) { }
377
static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
378
#endif
379

380
struct bio *__blk_queue_bounce(struct bio *bio, struct request_queue *q);
381 382 383 384 385 386 387 388

static inline bool blk_queue_may_bounce(struct request_queue *q)
{
	return IS_ENABLED(CONFIG_BOUNCE) &&
		q->limits.bounce == BLK_BOUNCE_HIGH &&
		max_low_pfn >= max_pfn;
}

389 390
static inline struct bio *blk_queue_bounce(struct bio *bio,
		struct request_queue *q)
391
{
392 393 394
	if (unlikely(blk_queue_may_bounce(q) && bio_has_data(bio)))
		return __blk_queue_bounce(bio, q);
	return bio;
395 396
}

397
#ifdef CONFIG_BLK_CGROUP_IOLATENCY
398
int blk_iolatency_init(struct gendisk *disk);
399
#else
400
static inline int blk_iolatency_init(struct gendisk *disk) { return 0; };
401 402
#endif

403
#ifdef CONFIG_BLK_DEV_ZONED
404
void disk_free_zone_bitmaps(struct gendisk *disk);
405
void disk_clear_zone_settings(struct gendisk *disk);
406
#else
407
static inline void disk_free_zone_bitmaps(struct gendisk *disk) {}
408
static inline void disk_clear_zone_settings(struct gendisk *disk) {}
409 410
#endif

411 412
int blk_alloc_ext_minor(void);
void blk_free_ext_minor(unsigned int minor);
413 414 415
#define ADDPART_FLAG_NONE	0
#define ADDPART_FLAG_RAID	1
#define ADDPART_FLAG_WHOLEDISK	2
416 417
int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
		sector_t length);
418
int bdev_del_partition(struct gendisk *disk, int partno);
419 420
int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
		sector_t length);
421
void blk_drop_partitions(struct gendisk *disk);
422

423 424 425
struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
		struct lock_class_key *lkclass);

426
int bio_add_hw_page(struct request_queue *q, struct bio *bio,
427
		struct page *page, unsigned int len, unsigned int offset,
428
		unsigned int max_sectors, bool *same_page);
429

430 431 432 433 434 435 436 437
static inline struct kmem_cache *blk_get_queue_kmem_cache(bool srcu)
{
	if (srcu)
		return blk_requestq_srcu_cachep;
	return blk_requestq_cachep;
}
struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu);

438
int disk_scan_partitions(struct gendisk *disk, fmode_t mode);
439

440
int disk_alloc_events(struct gendisk *disk);
441 442 443
void disk_add_events(struct gendisk *disk);
void disk_del_events(struct gendisk *disk);
void disk_release_events(struct gendisk *disk);
444 445 446
void disk_block_events(struct gendisk *disk);
void disk_unblock_events(struct gendisk *disk);
void disk_flush_events(struct gendisk *disk, unsigned int mask);
447 448 449
extern struct device_attribute dev_attr_events;
extern struct device_attribute dev_attr_events_async;
extern struct device_attribute dev_attr_events_poll_msecs;
450

451 452
extern struct attribute_group blk_trace_attr_group;

453
long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
454 455
long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);

456 457
extern const struct address_space_operations def_blk_aops;

458
int disk_register_independent_access_ranges(struct gendisk *disk);
459 460
void disk_unregister_independent_access_ranges(struct gendisk *disk);

461 462 463 464 465 466 467 468 469 470
#ifdef CONFIG_FAIL_MAKE_REQUEST
bool should_fail_request(struct block_device *part, unsigned int bytes);
#else /* CONFIG_FAIL_MAKE_REQUEST */
static inline bool should_fail_request(struct block_device *part,
					unsigned int bytes)
{
	return false;
}
#endif /* CONFIG_FAIL_MAKE_REQUEST */

471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501
/*
 * Optimized request reference counting. Ideally we'd make timeouts be more
 * clever, as that's the only reason we need references at all... But until
 * this happens, this is faster than using refcount_t. Also see:
 *
 * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count")
 */
#define req_ref_zero_or_close_to_overflow(req)	\
	((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u)

static inline bool req_ref_inc_not_zero(struct request *req)
{
	return atomic_inc_not_zero(&req->ref);
}

static inline bool req_ref_put_and_test(struct request *req)
{
	WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
	return atomic_dec_and_test(&req->ref);
}

static inline void req_ref_set(struct request *req, int value)
{
	atomic_set(&req->ref, value);
}

static inline int req_ref_read(struct request *req)
{
	return atomic_read(&req->ref);
}

502
#endif /* BLK_INTERNAL_H */