blk.h 14.5 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4
#ifndef BLK_INTERNAL_H
#define BLK_INTERNAL_H

5
#include <linux/blk-crypto.h>
6
#include <linux/memblock.h>	/* for max_pfn/max_low_pfn */
7
#include <xen/xen.h>
8
#include "blk-crypto-internal.h"
9

10 11
struct elevator_type;

12 13 14
/* Max future timer expiry for timeouts */
#define BLK_MAX_TIMEOUT		(5 * HZ)

15 16
extern struct dentry *blk_debugfs_root;

17 18 19
struct blk_flush_queue {
	unsigned int		flush_pending_idx:1;
	unsigned int		flush_running_idx:1;
20
	blk_status_t 		rq_status;
21 22 23 24
	unsigned long		flush_pending_since;
	struct list_head	flush_queue[2];
	struct list_head	flush_data_in_flight;
	struct request		*flush_rq;
25

26 27 28
	spinlock_t		mq_flush_lock;
};

29
extern struct kmem_cache *blk_requestq_cachep;
30
extern struct kmem_cache *blk_requestq_srcu_cachep;
31
extern struct kobj_type blk_queue_ktype;
32
extern struct ida blk_queue_ida;
33

M
Ming Lei 已提交
34
bool is_flush_rq(struct request *req);
35

36 37
struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
					      gfp_t flags);
38
void blk_free_flush_queue(struct blk_flush_queue *q);
39

40
void blk_freeze_queue(struct request_queue *q);
41
void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
42
void blk_queue_start_drain(struct request_queue *q);
43
int __bio_queue_enter(struct request_queue *q, struct bio *bio);
44
void submit_bio_noacct_nocheck(struct bio *bio);
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77

static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
{
	rcu_read_lock();
	if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
		goto fail;

	/*
	 * The code that increments the pm_only counter must ensure that the
	 * counter is globally visible before the queue is unfrozen.
	 */
	if (blk_queue_pm_only(q) &&
	    (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
		goto fail_put;

	rcu_read_unlock();
	return true;

fail_put:
	blk_queue_exit(q);
fail:
	rcu_read_unlock();
	return false;
}

static inline int bio_queue_enter(struct bio *bio)
{
	struct request_queue *q = bdev_get_queue(bio->bi_bdev);

	if (blk_try_enter_queue(q, false))
		return 0;
	return __bio_queue_enter(q, bio);
}
78

79
#define BIO_INLINE_VECS 4
80 81 82
struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
		gfp_t gfp_mask);
void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs);
83

84 85
static inline bool biovec_phys_mergeable(struct request_queue *q,
		struct bio_vec *vec1, struct bio_vec *vec2)
86
{
87
	unsigned long mask = queue_segment_boundary(q);
C
Christoph Hellwig 已提交
88 89
	phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
	phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
90 91

	if (addr1 + vec1->bv_len != addr2)
92
		return false;
93
	if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
94
		return false;
95 96
	if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
		return false;
97 98 99
	return true;
}

100
static inline bool __bvec_gap_to_prev(struct queue_limits *lim,
101 102
		struct bio_vec *bprv, unsigned int offset)
{
103 104
	return (offset & lim->virt_boundary_mask) ||
		((bprv->bv_offset + bprv->bv_len) & lim->virt_boundary_mask);
105 106 107 108 109 110
}

/*
 * Check if adding a bio_vec after bprv with offset would create a gap in
 * the SG list. Most drivers don't care about this, but some do.
 */
111
static inline bool bvec_gap_to_prev(struct queue_limits *lim,
112 113
		struct bio_vec *bprv, unsigned int offset)
{
114
	if (!lim->virt_boundary_mask)
115
		return false;
116
	return __bvec_gap_to_prev(lim, bprv, offset);
117 118
}

119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
static inline bool rq_mergeable(struct request *rq)
{
	if (blk_rq_is_passthrough(rq))
		return false;

	if (req_op(rq) == REQ_OP_FLUSH)
		return false;

	if (req_op(rq) == REQ_OP_WRITE_ZEROES)
		return false;

	if (req_op(rq) == REQ_OP_ZONE_APPEND)
		return false;

	if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
		return false;
	if (rq->rq_flags & RQF_NOMERGE_FLAGS)
		return false;

	return true;
}

/*
 * There are two different ways to handle DISCARD merges:
 *  1) If max_discard_segments > 1, the driver treats every bio as a range and
 *     send the bios to controller together. The ranges don't need to be
 *     contiguous.
 *  2) Otherwise, the request will be normal read/write requests.  The ranges
 *     need to be contiguous.
 */
static inline bool blk_discard_mergable(struct request *req)
{
	if (req_op(req) == REQ_OP_DISCARD &&
	    queue_max_discard_segments(req->q) > 1)
		return true;
	return false;
}

157
static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
158
						     enum req_op op)
159 160 161 162 163 164 165 166 167 168 169
{
	if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
		return min(q->limits.max_discard_sectors,
			   UINT_MAX >> SECTOR_SHIFT);

	if (unlikely(op == REQ_OP_WRITE_ZEROES))
		return q->limits.max_write_zeroes_sectors;

	return q->limits.max_sectors;
}

170 171
#ifdef CONFIG_BLK_DEV_INTEGRITY
void blk_flush_integrity(void);
172
bool __bio_integrity_endio(struct bio *);
173
void bio_integrity_free(struct bio *bio);
174 175 176 177 178 179
static inline bool bio_integrity_endio(struct bio *bio)
{
	if (bio_integrity(bio))
		return __bio_integrity_endio(bio);
	return true;
}
180

181 182
bool blk_integrity_merge_rq(struct request_queue *, struct request *,
		struct request *);
183 184
bool blk_integrity_merge_bio(struct request_queue *, struct request *,
		struct bio *);
185

186 187 188 189 190 191
static inline bool integrity_req_gap_back_merge(struct request *req,
		struct bio *next)
{
	struct bio_integrity_payload *bip = bio_integrity(req->bio);
	struct bio_integrity_payload *bip_next = bio_integrity(next);

192 193
	return bvec_gap_to_prev(&req->q->limits,
				&bip->bip_vec[bip->bip_vcnt - 1],
194 195 196 197 198 199 200 201 202
				bip_next->bip_vec[0].bv_offset);
}

static inline bool integrity_req_gap_front_merge(struct request *req,
		struct bio *bio)
{
	struct bio_integrity_payload *bip = bio_integrity(bio);
	struct bio_integrity_payload *bip_next = bio_integrity(req->bio);

203 204
	return bvec_gap_to_prev(&req->q->limits,
				&bip->bip_vec[bip->bip_vcnt - 1],
205 206
				bip_next->bip_vec[0].bv_offset);
}
207

208
int blk_integrity_add(struct gendisk *disk);
209
void blk_integrity_del(struct gendisk *);
210
#else /* CONFIG_BLK_DEV_INTEGRITY */
211 212 213 214 215
static inline bool blk_integrity_merge_rq(struct request_queue *rq,
		struct request *r1, struct request *r2)
{
	return true;
}
216 217 218 219 220
static inline bool blk_integrity_merge_bio(struct request_queue *rq,
		struct request *r, struct bio *b)
{
	return true;
}
221 222 223 224 225 226 227 228 229 230 231
static inline bool integrity_req_gap_back_merge(struct request *req,
		struct bio *next)
{
	return false;
}
static inline bool integrity_req_gap_front_merge(struct request *req,
		struct bio *bio)
{
	return false;
}

232 233 234
static inline void blk_flush_integrity(void)
{
}
235 236 237 238
static inline bool bio_integrity_endio(struct bio *bio)
{
	return true;
}
239 240 241
static inline void bio_integrity_free(struct bio *bio)
{
}
242
static inline int blk_integrity_add(struct gendisk *disk)
243
{
244
	return 0;
245 246 247 248
}
static inline void blk_integrity_del(struct gendisk *disk)
{
}
249
#endif /* CONFIG_BLK_DEV_INTEGRITY */
250

251
unsigned long blk_rq_timeout(unsigned long timeout);
252
void blk_add_timer(struct request *req);
253
const char *blk_status_to_str(blk_status_t status);
254 255

bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
256
		unsigned int nr_segs);
257 258
bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
			struct bio *bio, unsigned int nr_segs);
259

260 261 262 263 264 265
/*
 * Plug flush limits
 */
#define BLK_MAX_REQUEST_COUNT	32
#define BLK_PLUG_FLUSH_SIZE	(128 * 1024)

266 267 268
/*
 * Internal elevator interface
 */
269
#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
270

271
void blk_insert_flush(struct request *rq);
T
Tejun Heo 已提交
272

273 274
int elevator_switch_mq(struct request_queue *q,
			      struct elevator_type *new_e);
275
void elevator_exit(struct request_queue *q);
276
int elv_register_queue(struct request_queue *q, bool uevent);
277 278
void elv_unregister_queue(struct request_queue *q);

279 280 281 282 283 284 285 286 287 288
ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
		char *buf);
ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
		char *buf);
ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
		char *buf);
ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
		char *buf);
ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
		const char *buf, size_t count);
289 290 291 292
ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
ssize_t part_timeout_store(struct device *, struct device_attribute *,
				const char *, size_t);

293 294
static inline bool bio_may_exceed_limits(struct bio *bio,
		struct queue_limits *lim)
295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
{
	switch (bio_op(bio)) {
	case REQ_OP_DISCARD:
	case REQ_OP_SECURE_ERASE:
	case REQ_OP_WRITE_ZEROES:
		return true; /* non-trivial splitting decisions */
	default:
		break;
	}

	/*
	 * All drivers must accept single-segments bios that are <= PAGE_SIZE.
	 * This is a quick and dirty check that relies on the fact that
	 * bi_io_vec[0] is always valid if a bio has data.  The check might
	 * lead to occasional false negatives when bios are cloned, but compared
	 * to the performance impact of cloned bios themselves the loop below
	 * doesn't matter anyway.
	 */
313
	return lim->chunk_sectors || bio->bi_vcnt != 1 ||
314 315 316
		bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
}

317
struct bio *__bio_split_to_limits(struct bio *bio, struct queue_limits *lim,
318
		       unsigned int *nr_segs);
319 320
int ll_back_merge_fn(struct request *req, struct bio *bio,
		unsigned int nr_segs);
321
bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
322
				struct request *next);
323
unsigned int blk_recalc_rq_segments(struct request *rq);
324
void blk_rq_set_mixed_merge(struct request *rq);
325
bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
326
enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
327

328 329
int blk_dev_init(void);

330 331 332 333
/*
 * Contribute to IO statistics IFF:
 *
 *	a) it's attached to a gendisk, and
334
 *	b) the queue had IO stats enabled when this request was started
335
 */
336
static inline bool blk_do_io_stat(struct request *rq)
337
{
338
	return (rq->rq_flags & RQF_IO_STAT) && !blk_rq_is_passthrough(rq);
339 340
}

341
void update_io_ticks(struct block_device *part, unsigned long now, bool end);
342

343 344 345 346 347 348 349
static inline void req_set_nomerge(struct request_queue *q, struct request *req)
{
	req->cmd_flags |= REQ_NOMERGE;
	if (req == q->last_merge)
		q->last_merge = NULL;
}

350 351 352
/*
 * Internal io_context interface
 */
353
struct io_cq *ioc_find_get_icq(struct request_queue *q);
C
Christoph Hellwig 已提交
354
struct io_cq *ioc_lookup_icq(struct request_queue *q);
355
#ifdef CONFIG_BLK_ICQ
356
void ioc_clear_queue(struct request_queue *q);
357 358 359 360 361
#else
static inline void ioc_clear_queue(struct request_queue *q)
{
}
#endif /* CONFIG_BLK_ICQ */
362

363 364 365 366
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
	const char *page, size_t count);
367
extern void blk_throtl_bio_endio(struct bio *bio);
368
extern void blk_throtl_stat_add(struct request *rq, u64 time);
369 370
#else
static inline void blk_throtl_bio_endio(struct bio *bio) { }
371
static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
372
#endif
373

374
struct bio *__blk_queue_bounce(struct bio *bio, struct request_queue *q);
375 376 377 378 379 380 381 382

static inline bool blk_queue_may_bounce(struct request_queue *q)
{
	return IS_ENABLED(CONFIG_BOUNCE) &&
		q->limits.bounce == BLK_BOUNCE_HIGH &&
		max_low_pfn >= max_pfn;
}

383 384
static inline struct bio *blk_queue_bounce(struct bio *bio,
		struct request_queue *q)
385
{
386 387 388
	if (unlikely(blk_queue_may_bounce(q) && bio_has_data(bio)))
		return __blk_queue_bounce(bio, q);
	return bio;
389 390
}

391 392 393 394 395 396
#ifdef CONFIG_BLK_CGROUP_IOLATENCY
extern int blk_iolatency_init(struct request_queue *q);
#else
static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
#endif

397
#ifdef CONFIG_BLK_DEV_ZONED
398
void disk_free_zone_bitmaps(struct gendisk *disk);
399
void disk_clear_zone_settings(struct gendisk *disk);
400
#else
401
static inline void disk_free_zone_bitmaps(struct gendisk *disk) {}
402
static inline void disk_clear_zone_settings(struct gendisk *disk) {}
403 404
#endif

405 406
int blk_alloc_ext_minor(void);
void blk_free_ext_minor(unsigned int minor);
407 408 409
#define ADDPART_FLAG_NONE	0
#define ADDPART_FLAG_RAID	1
#define ADDPART_FLAG_WHOLEDISK	2
410 411
int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
		sector_t length);
412
int bdev_del_partition(struct gendisk *disk, int partno);
413 414
int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
		sector_t length);
415
void blk_drop_partitions(struct gendisk *disk);
416

417 418 419
struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
		struct lock_class_key *lkclass);

420
int bio_add_hw_page(struct request_queue *q, struct bio *bio,
421
		struct page *page, unsigned int len, unsigned int offset,
422
		unsigned int max_sectors, bool *same_page);
423

424 425 426 427 428 429 430 431
static inline struct kmem_cache *blk_get_queue_kmem_cache(bool srcu)
{
	if (srcu)
		return blk_requestq_srcu_cachep;
	return blk_requestq_cachep;
}
struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu);

432
int disk_scan_partitions(struct gendisk *disk, fmode_t mode);
433

434
int disk_alloc_events(struct gendisk *disk);
435 436 437
void disk_add_events(struct gendisk *disk);
void disk_del_events(struct gendisk *disk);
void disk_release_events(struct gendisk *disk);
438 439 440
void disk_block_events(struct gendisk *disk);
void disk_unblock_events(struct gendisk *disk);
void disk_flush_events(struct gendisk *disk, unsigned int mask);
441 442 443
extern struct device_attribute dev_attr_events;
extern struct device_attribute dev_attr_events_async;
extern struct device_attribute dev_attr_events_poll_msecs;
444

445 446
extern struct attribute_group blk_trace_attr_group;

447
long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
448 449
long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);

450 451
extern const struct address_space_operations def_blk_aops;

452
int disk_register_independent_access_ranges(struct gendisk *disk);
453 454
void disk_unregister_independent_access_ranges(struct gendisk *disk);

455 456 457 458 459 460 461 462 463 464
#ifdef CONFIG_FAIL_MAKE_REQUEST
bool should_fail_request(struct block_device *part, unsigned int bytes);
#else /* CONFIG_FAIL_MAKE_REQUEST */
static inline bool should_fail_request(struct block_device *part,
					unsigned int bytes)
{
	return false;
}
#endif /* CONFIG_FAIL_MAKE_REQUEST */

465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495
/*
 * Optimized request reference counting. Ideally we'd make timeouts be more
 * clever, as that's the only reason we need references at all... But until
 * this happens, this is faster than using refcount_t. Also see:
 *
 * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count")
 */
#define req_ref_zero_or_close_to_overflow(req)	\
	((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u)

static inline bool req_ref_inc_not_zero(struct request *req)
{
	return atomic_inc_not_zero(&req->ref);
}

static inline bool req_ref_put_and_test(struct request *req)
{
	WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
	return atomic_dec_and_test(&req->ref);
}

static inline void req_ref_set(struct request *req, int value)
{
	atomic_set(&req->ref, value);
}

static inline int req_ref_read(struct request *req)
{
	return atomic_read(&req->ref);
}

496
#endif /* BLK_INTERNAL_H */