blkdev.h 46.1 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
C
Christoph Hellwig 已提交
2 3 4
/*
 * Portions Copyright (C) 1992 Drew Eckhardt
 */
L
Linus Torvalds 已提交
5 6 7
#ifndef _LINUX_BLKDEV_H
#define _LINUX_BLKDEV_H

C
Christoph Hellwig 已提交
8 9 10
#include <linux/types.h>
#include <linux/blk_types.h>
#include <linux/device.h>
L
Linus Torvalds 已提交
11
#include <linux/list.h>
12
#include <linux/llist.h>
13
#include <linux/minmax.h>
L
Linus Torvalds 已提交
14 15 16 17
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/wait.h>
#include <linux/bio.h>
18
#include <linux/gfp.h>
C
Christoph Hellwig 已提交
19
#include <linux/kdev_t.h>
T
Tejun Heo 已提交
20
#include <linux/rcupdate.h>
21
#include <linux/percpu-refcount.h>
22
#include <linux/blkzoned.h>
C
Christoph Hellwig 已提交
23
#include <linux/sched.h>
24
#include <linux/sbitmap.h>
25
#include <linux/srcu.h>
C
Christoph Hellwig 已提交
26 27
#include <linux/uuid.h>
#include <linux/xarray.h>
L
Linus Torvalds 已提交
28

29
struct module;
L
Linus Torvalds 已提交
30 31
struct request_queue;
struct elevator_queue;
32
struct blk_trace;
33 34
struct request;
struct sg_io_hdr;
T
Tejun Heo 已提交
35
struct blkcg_gq;
36
struct blk_flush_queue;
37
struct kiocb;
38
struct pr_ops;
39
struct rq_qos;
40 41
struct blk_queue_stats;
struct blk_stat_callback;
42
struct blk_crypto_profile;
L
Linus Torvalds 已提交
43

C
Christoph Hellwig 已提交
44 45 46 47
extern const struct device_type disk_type;
extern struct device_type part_type;
extern struct class block_class;

48
/* Must be consistent with blk_mq_poll_stats_bkt() */
49 50
#define BLK_MQ_POLL_STATS_BKTS 16

51 52 53
/* Doing classic polling */
#define BLK_MQ_POLL_CLASSIC -1

T
Tejun Heo 已提交
54 55 56 57
/*
 * Maximum number of blkcg policies allowed to be registered concurrently.
 * Defined here to simplify include dependency.
 */
58
#define BLKCG_MAX_POLS		6
T
Tejun Heo 已提交
59

C
Christoph Hellwig 已提交
60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
#define DISK_MAX_PARTS			256
#define DISK_NAME_LEN			32

#define PARTITION_META_INFO_VOLNAMELTH	64
/*
 * Enough for the string representation of any kind of UUID plus NULL.
 * EFI UUID is 36 characters. MSDOS UUID is 11 characters.
 */
#define PARTITION_META_INFO_UUIDLTH	(UUID_STRING_LEN + 1)

struct partition_meta_info {
	char uuid[PARTITION_META_INFO_UUIDLTH];
	u8 volname[PARTITION_META_INFO_VOLNAMELTH];
};

/**
 * DOC: genhd capability flags
 *
 * ``GENHD_FL_REMOVABLE``: indicates that the block device gives access to
 * removable media.  When set, the device remains present even when media is not
 * inserted.  Shall not be set for devices which are removed entirely when the
 * media is removed.
 *
 * ``GENHD_FL_HIDDEN``: the block device is hidden; it doesn't produce events,
 * doesn't appear in sysfs, and can't be opened from userspace or using
 * blkdev_get*. Used for the underlying components of multipath devices.
 *
 * ``GENHD_FL_NO_PART``: partition support is disabled.  The kernel will not
 * scan for partitions from add_disk, and users can't add partitions manually.
 *
 */
enum {
	GENHD_FL_REMOVABLE			= 1 << 0,
	GENHD_FL_HIDDEN				= 1 << 1,
	GENHD_FL_NO_PART			= 1 << 2,
};

enum {
	DISK_EVENT_MEDIA_CHANGE			= 1 << 0, /* media changed */
	DISK_EVENT_EJECT_REQUEST		= 1 << 1, /* eject requested */
};

enum {
	/* Poll even if events_poll_msecs is unset */
	DISK_EVENT_FLAG_POLL			= 1 << 0,
	/* Forward events to udev */
	DISK_EVENT_FLAG_UEVENT			= 1 << 1,
	/* Block event polling when open for exclusive write */
	DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE	= 1 << 2,
};

struct disk_events;
struct badblocks;

struct blk_integrity {
	const struct blk_integrity_profile	*profile;
	unsigned char				flags;
	unsigned char				tuple_size;
	unsigned char				interval_exp;
	unsigned char				tag_size;
};

struct gendisk {
	/*
	 * major/first_minor/minors should not be set by any new driver, the
	 * block core will take care of allocating them automatically.
	 */
	int major;
	int first_minor;
	int minors;

	char disk_name[DISK_NAME_LEN];	/* name of major driver */

	unsigned short events;		/* supported events */
	unsigned short event_flags;	/* flags related to event processing */

	struct xarray part_tbl;
	struct block_device *part0;

	const struct block_device_operations *fops;
	struct request_queue *queue;
	void *private_data;

	int flags;
	unsigned long state;
#define GD_NEED_PART_SCAN		0
#define GD_READ_ONLY			1
#define GD_DEAD				2
#define GD_NATIVE_CAPACITY		3
149
#define GD_ADDED			4
C
Christoph Hellwig 已提交
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198

	struct mutex open_mutex;	/* open/close mutex */
	unsigned open_partitions;	/* number of open partitions */

	struct backing_dev_info	*bdi;
	struct kobject *slave_dir;
#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
	struct list_head slave_bdevs;
#endif
	struct timer_rand_state *random;
	atomic_t sync_io;		/* RAID */
	struct disk_events *ev;
#ifdef  CONFIG_BLK_DEV_INTEGRITY
	struct kobject integrity_kobj;
#endif	/* CONFIG_BLK_DEV_INTEGRITY */
#if IS_ENABLED(CONFIG_CDROM)
	struct cdrom_device_info *cdi;
#endif
	int node_id;
	struct badblocks *bb;
	struct lockdep_map lockdep_map;
	u64 diskseq;
};

static inline bool disk_live(struct gendisk *disk)
{
	return !inode_unhashed(disk->part0->bd_inode);
}

/*
 * The gendisk is refcounted by the part0 block_device, and the bd_device
 * therein is also used for device model presentation in sysfs.
 */
#define dev_to_disk(device) \
	(dev_to_bdev(device)->bd_disk)
#define disk_to_dev(disk) \
	(&((disk)->part0->bd_device))

#if IS_REACHABLE(CONFIG_CDROM)
#define disk_to_cdi(disk)	((disk)->cdi)
#else
#define disk_to_cdi(disk)	NULL
#endif

static inline dev_t disk_devt(struct gendisk *disk)
{
	return MKDEV(disk->major, disk->first_minor);
}

199
static inline int blk_validate_block_size(unsigned long bsize)
200 201 202 203 204 205 206
{
	if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize))
		return -EINVAL;

	return 0;
}

207
static inline bool blk_op_is_passthrough(unsigned int op)
208
{
209
	op &= REQ_OP_MASK;
210 211 212
	return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
}

D
Damien Le Moal 已提交
213 214
/*
 * Zoned block device models (zoned limit).
215 216 217
 *
 * Note: This needs to be ordered from the least to the most severe
 * restrictions for the inheritance in blk_stack_limits() to work.
D
Damien Le Moal 已提交
218 219
 */
enum blk_zoned_model {
220 221 222
	BLK_ZONED_NONE = 0,	/* Regular block device */
	BLK_ZONED_HA,		/* Host-aware zoned block device */
	BLK_ZONED_HM,		/* Host-managed zoned block device */
D
Damien Le Moal 已提交
223 224
};

225 226 227 228 229 230 231 232 233
/*
 * BLK_BOUNCE_NONE:	never bounce (default)
 * BLK_BOUNCE_HIGH:	bounce all highmem pages
 */
enum blk_bounce {
	BLK_BOUNCE_NONE,
	BLK_BOUNCE_HIGH,
};

234
struct queue_limits {
235
	enum blk_bounce		bounce;
236
	unsigned long		seg_boundary_mask;
237
	unsigned long		virt_boundary_mask;
238 239

	unsigned int		max_hw_sectors;
240
	unsigned int		max_dev_sectors;
241
	unsigned int		chunk_sectors;
242 243
	unsigned int		max_sectors;
	unsigned int		max_segment_size;
244
	unsigned int		physical_block_size;
245
	unsigned int		logical_block_size;
246 247 248
	unsigned int		alignment_offset;
	unsigned int		io_min;
	unsigned int		io_opt;
249
	unsigned int		max_discard_sectors;
250
	unsigned int		max_hw_discard_sectors;
251
	unsigned int		max_write_same_sectors;
252
	unsigned int		max_write_zeroes_sectors;
253
	unsigned int		max_zone_append_sectors;
254 255
	unsigned int		discard_granularity;
	unsigned int		discard_alignment;
256
	unsigned int		zone_write_granularity;
257

258
	unsigned short		max_segments;
259
	unsigned short		max_integrity_segments;
260
	unsigned short		max_discard_segments;
261

262
	unsigned char		misaligned;
263
	unsigned char		discard_misaligned;
264
	unsigned char		raid_partial_stripes_expensive;
D
Damien Le Moal 已提交
265
	enum blk_zoned_model	zoned;
266 267
};

C
Christoph Hellwig 已提交
268 269 270
typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
			       void *data);

271 272
void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model);

273 274
#ifdef CONFIG_BLK_DEV_ZONED

C
Christoph Hellwig 已提交
275 276 277
#define BLK_ALL_ZONES  ((unsigned int)-1)
int blkdev_report_zones(struct block_device *bdev, sector_t sector,
			unsigned int nr_zones, report_zones_cb cb, void *data);
278
unsigned int blkdev_nr_zones(struct gendisk *disk);
279 280 281
extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
			    sector_t sectors, sector_t nr_sectors,
			    gfp_t gfp_mask);
D
Damien Le Moal 已提交
282 283
int blk_revalidate_disk_zones(struct gendisk *disk,
			      void (*update_driver_data)(struct gendisk *disk));
284

S
Shaun Tancheff 已提交
285 286
extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
				     unsigned int cmd, unsigned long arg);
287 288
extern int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
				  unsigned int cmd, unsigned long arg);
S
Shaun Tancheff 已提交
289 290 291

#else /* CONFIG_BLK_DEV_ZONED */

292
static inline unsigned int blkdev_nr_zones(struct gendisk *disk)
293 294 295
{
	return 0;
}
296

S
Shaun Tancheff 已提交
297 298 299 300 301 302 303
static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
					    fmode_t mode, unsigned int cmd,
					    unsigned long arg)
{
	return -ENOTTY;
}

304 305 306
static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
					 fmode_t mode, unsigned int cmd,
					 unsigned long arg)
S
Shaun Tancheff 已提交
307 308 309 310
{
	return -ENOTTY;
}

311 312
#endif /* CONFIG_BLK_DEV_ZONED */

313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340
/*
 * Independent access ranges: struct blk_independent_access_range describes
 * a range of contiguous sectors that can be accessed using device command
 * execution resources that are independent from the resources used for
 * other access ranges. This is typically found with single-LUN multi-actuator
 * HDDs where each access range is served by a different set of heads.
 * The set of independent ranges supported by the device is defined using
 * struct blk_independent_access_ranges. The independent ranges must not overlap
 * and must include all sectors within the disk capacity (no sector holes
 * allowed).
 * For a device with multiple ranges, requests targeting sectors in different
 * ranges can be executed in parallel. A request can straddle an access range
 * boundary.
 */
struct blk_independent_access_range {
	struct kobject		kobj;
	struct request_queue	*queue;
	sector_t		sector;
	sector_t		nr_sectors;
};

struct blk_independent_access_ranges {
	struct kobject				kobj;
	bool					sysfs_registered;
	unsigned int				nr_ia_ranges;
	struct blk_independent_access_range	ia_range[];
};

341
struct request_queue {
L
Linus Torvalds 已提交
342
	struct request		*last_merge;
J
Jens Axboe 已提交
343
	struct elevator_queue	*elevator;
L
Linus Torvalds 已提交
344

345 346
	struct percpu_ref	q_usage_counter;

347
	struct blk_queue_stats	*stats;
348
	struct rq_qos		*rq_qos;
J
Jens Axboe 已提交
349

J
Jens Axboe 已提交
350
	const struct blk_mq_ops	*mq_ops;
351 352

	/* sw queues */
353
	struct blk_mq_ctx __percpu	*queue_ctx;
354

355 356
	unsigned int		queue_depth;

357
	/* hw dispatch queues */
M
Ming Lei 已提交
358
	struct xarray		hctx_table;
359 360
	unsigned int		nr_hw_queues;

L
Linus Torvalds 已提交
361 362 363 364 365 366 367
	/*
	 * The queue owner gets to use this for whatever they like.
	 * ll_rw_blk doesn't touch it.
	 */
	void			*queuedata;

	/*
368
	 * various queue flags, see QUEUE_* below
L
Linus Torvalds 已提交
369
	 */
370
	unsigned long		queue_flags;
371 372
	/*
	 * Number of contexts that have called blk_set_pm_only(). If this
373
	 * counter is above zero then only RQF_PM requests are processed.
374 375
	 */
	atomic_t		pm_only;
L
Linus Torvalds 已提交
376

377 378 379 380 381 382
	/*
	 * ida allocated id for this queue.  Used to index queues from
	 * ioctx.
	 */
	int			id;

383
	spinlock_t		queue_lock;
L
Linus Torvalds 已提交
384

385 386
	struct gendisk		*disk;

L
Linus Torvalds 已提交
387 388 389 390 391
	/*
	 * queue kobject
	 */
	struct kobject kobj;

392 393 394
	/*
	 * mq queue kobject
	 */
395
	struct kobject *mq_kobj;
396

397 398 399 400
#ifdef  CONFIG_BLK_DEV_INTEGRITY
	struct blk_integrity integrity;
#endif	/* CONFIG_BLK_DEV_INTEGRITY */

401
#ifdef CONFIG_PM
L
Lin Ming 已提交
402
	struct device		*dev;
403
	enum rpm_status		rpm_status;
L
Lin Ming 已提交
404 405
#endif

L
Linus Torvalds 已提交
406 407 408 409 410
	/*
	 * queue settings
	 */
	unsigned long		nr_requests;	/* Max # of requests */

411
	unsigned int		dma_pad_mask;
L
Linus Torvalds 已提交
412 413
	unsigned int		dma_alignment;

414
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
415
	struct blk_crypto_profile *crypto_profile;
416
	struct kobject *crypto_kobject;
417 418
#endif

J
Jens Axboe 已提交
419
	unsigned int		rq_timeout;
420
	int			poll_nsec;
421 422

	struct blk_stat_callback	*poll_cb;
423
	struct blk_rq_stat	*poll_stat;
424

J
Jens Axboe 已提交
425
	struct timer_list	timeout;
426
	struct work_struct	timeout_work;
J
Jens Axboe 已提交
427

428
	atomic_t		nr_active_requests_shared_tags;
429

430
	struct blk_mq_tags	*sched_shared_tags;
431

432
	struct list_head	icq_list;
433
#ifdef CONFIG_BLK_CGROUP
434
	DECLARE_BITMAP		(blkcg_pols, BLKCG_MAX_POLS);
T
Tejun Heo 已提交
435
	struct blkcg_gq		*root_blkg;
436
	struct list_head	blkg_list;
437
#endif
438

439 440
	struct queue_limits	limits;

441 442
	unsigned int		required_elevator_features;

443
#ifdef CONFIG_BLK_DEV_ZONED
444 445 446
	/*
	 * Zoned block device information for request dispatch control.
	 * nr_zones is the total number of zones of the device. This is always
447 448 449
	 * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones
	 * bits which indicates if a zone is conventional (bit set) or
	 * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones
450 451 452 453 454
	 * bits which indicates if a zone is write locked, that is, if a write
	 * request targeting the zone was dispatched. All three fields are
	 * initialized by the low level device driver (e.g. scsi/sd.c).
	 * Stacking drivers (device mappers) may or may not initialize
	 * these fields.
455 456 457 458 459
	 *
	 * Reads of this information must be protected with blk_queue_enter() /
	 * blk_queue_exit(). Modifying this information is only allowed while
	 * no requests are being processed. See also blk_mq_freeze_queue() and
	 * blk_mq_unfreeze_queue().
460 461
	 */
	unsigned int		nr_zones;
462
	unsigned long		*conv_zones_bitmap;
463
	unsigned long		*seq_zones_wlock;
464
	unsigned int		max_open_zones;
465
	unsigned int		max_active_zones;
466
#endif /* CONFIG_BLK_DEV_ZONED */
467

468
	int			node;
469
	struct mutex		debugfs_mutex;
470
#ifdef CONFIG_BLK_DEV_IO_TRACE
471
	struct blk_trace __rcu	*blk_trace;
472
#endif
L
Linus Torvalds 已提交
473
	/*
474
	 * for flush operations
L
Linus Torvalds 已提交
475
	 */
476
	struct blk_flush_queue	*fq;
477

478 479
	struct list_head	requeue_list;
	spinlock_t		requeue_lock;
480
	struct delayed_work	requeue_work;
481

482
	struct mutex		sysfs_lock;
483
	struct mutex		sysfs_dir_lock;
484

485 486 487 488 489 490 491
	/*
	 * for reusing dead hctx instance in case of updating
	 * nr_hw_queues
	 */
	struct list_head	unused_hctx_list;
	spinlock_t		unused_hctx_lock;

492
	int			mq_freeze_depth;
493

494 495 496 497
#ifdef CONFIG_BLK_DEV_THROTTLING
	/* Throttle data */
	struct throtl_data *td;
#endif
T
Tejun Heo 已提交
498
	struct rcu_head		rcu_head;
499
	wait_queue_head_t	mq_freeze_wq;
500 501 502 503 504
	/*
	 * Protect concurrent access to q_usage_counter by
	 * percpu_ref_kill() and percpu_ref_reinit().
	 */
	struct mutex		mq_freeze_lock;
505

506 507
	int			quiesce_depth;

508 509
	struct blk_mq_tag_set	*tag_set;
	struct list_head	tag_set_list;
510
	struct bio_set		bio_split;
511

512
	struct dentry		*debugfs_dir;
513 514

#ifdef CONFIG_BLK_DEBUG_FS
515
	struct dentry		*sched_debugfs_dir;
M
Ming Lei 已提交
516
	struct dentry		*rqos_debugfs_dir;
517 518
#endif

519
	bool			mq_sysfs_init_done;
520

521 522
#define BLK_MAX_WRITE_HINTS	5
	u64			write_hints[BLK_MAX_WRITE_HINTS];
523 524 525 526 527 528

	/*
	 * Independent sector access ranges. This is always NULL for
	 * devices that do not have multiple independent access ranges.
	 */
	struct blk_independent_access_ranges *ia_ranges;
529 530 531 532 533 534

	/**
	 * @srcu: Sleepable RCU. Use as lock when type of the request queue
	 * is blocking (BLK_MQ_F_BLOCKING). Must be the last member
	 */
	struct srcu_struct	srcu[];
L
Linus Torvalds 已提交
535 536
};

537
/* Keep blk_queue_flag_name[] in sync with the definitions below */
J
Jens Axboe 已提交
538 539
#define QUEUE_FLAG_STOPPED	0	/* queue is stopped */
#define QUEUE_FLAG_DYING	1	/* queue being torn down */
540
#define QUEUE_FLAG_HAS_SRCU	2	/* SRCU is allocated */
J
Jens Axboe 已提交
541 542 543 544 545 546 547 548 549 550 551 552 553
#define QUEUE_FLAG_NOMERGES     3	/* disable merge attempts */
#define QUEUE_FLAG_SAME_COMP	4	/* complete on same CPU-group */
#define QUEUE_FLAG_FAIL_IO	5	/* fake timeout */
#define QUEUE_FLAG_NONROT	6	/* non-rotational device (SSD) */
#define QUEUE_FLAG_VIRT		QUEUE_FLAG_NONROT /* paravirt device */
#define QUEUE_FLAG_IO_STAT	7	/* do disk/partitions IO accounting */
#define QUEUE_FLAG_DISCARD	8	/* supports DISCARD */
#define QUEUE_FLAG_NOXMERGES	9	/* No extended merges */
#define QUEUE_FLAG_ADD_RANDOM	10	/* Contributes to random pool */
#define QUEUE_FLAG_SECERASE	11	/* supports secure erase */
#define QUEUE_FLAG_SAME_FORCE	12	/* force complete on same CPU */
#define QUEUE_FLAG_DEAD		13	/* queue tear-down finished */
#define QUEUE_FLAG_INIT_DONE	14	/* queue is initialized */
554
#define QUEUE_FLAG_STABLE_WRITES 15	/* don't modify blks until WB is done */
J
Jens Axboe 已提交
555 556 557 558 559 560 561 562
#define QUEUE_FLAG_POLL		16	/* IO polling enabled if set */
#define QUEUE_FLAG_WC		17	/* Write back caching */
#define QUEUE_FLAG_FUA		18	/* device supports FUA writes */
#define QUEUE_FLAG_DAX		19	/* device supports DAX */
#define QUEUE_FLAG_STATS	20	/* track IO start and completion times */
#define QUEUE_FLAG_REGISTERED	22	/* queue has been registered to a disk */
#define QUEUE_FLAG_QUIESCED	24	/* queue has been quiesced */
#define QUEUE_FLAG_PCI_P2PDMA	25	/* device supports PCI p2p requests */
563
#define QUEUE_FLAG_ZONE_RESETALL 26	/* supports Zone Reset All */
564
#define QUEUE_FLAG_RQ_ALLOC_TIME 27	/* record rq->alloc_time_ns */
M
Mike Snitzer 已提交
565 566
#define QUEUE_FLAG_HCTX_ACTIVE	28	/* at least one blk-mq hctx is active */
#define QUEUE_FLAG_NOWAIT       29	/* device supports NOWAIT */
567

568
#define QUEUE_FLAG_MQ_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\
M
Mike Snitzer 已提交
569 570
				 (1 << QUEUE_FLAG_SAME_COMP) |		\
				 (1 << QUEUE_FLAG_NOWAIT))
571

572 573 574 575
void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);

L
Linus Torvalds 已提交
576
#define blk_queue_stopped(q)	test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
B
Bart Van Assche 已提交
577
#define blk_queue_dying(q)	test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
578
#define blk_queue_has_srcu(q)	test_bit(QUEUE_FLAG_HAS_SRCU, &(q)->queue_flags)
579
#define blk_queue_dead(q)	test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
580
#define blk_queue_init_done(q)	test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
581
#define blk_queue_nomerges(q)	test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
582 583
#define blk_queue_noxmerges(q)	\
	test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
584
#define blk_queue_nonrot(q)	test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
585 586
#define blk_queue_stable_writes(q) \
	test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags)
587
#define blk_queue_io_stat(q)	test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
588
#define blk_queue_add_random(q)	test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
589
#define blk_queue_discard(q)	test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
590 591
#define blk_queue_zone_resetall(q)	\
	test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags)
592 593
#define blk_queue_secure_erase(q) \
	(test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
594
#define blk_queue_dax(q)	test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
595 596
#define blk_queue_pci_p2pdma(q)	\
	test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags)
597 598 599 600 601 602
#ifdef CONFIG_BLK_RQ_ALLOC_TIME
#define blk_queue_rq_alloc_time(q)	\
	test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags)
#else
#define blk_queue_rq_alloc_time(q)	false
#endif
L
Linus Torvalds 已提交
603

604 605 606
#define blk_noretry_request(rq) \
	((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
			     REQ_FAILFAST_DRIVER))
607
#define blk_queue_quiesced(q)	test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
608
#define blk_queue_pm_only(q)	atomic_read(&(q)->pm_only)
609
#define blk_queue_fua(q)	test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
610
#define blk_queue_registered(q)	test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
M
Mike Snitzer 已提交
611
#define blk_queue_nowait(q)	test_bit(QUEUE_FLAG_NOWAIT, &(q)->queue_flags)
612

613 614
extern void blk_set_pm_only(struct request_queue *q);
extern void blk_clear_pm_only(struct request_queue *q);
615

L
Linus Torvalds 已提交
616 617
#define list_entry_rq(ptr)	list_entry((ptr), struct request, queuelist)

618 619 620 621
#define dma_map_bvec(dev, bv, dir, attrs) \
	dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \
	(dir), (attrs))

J
Jens Axboe 已提交
622
static inline bool queue_is_mq(struct request_queue *q)
623
{
J
Jens Axboe 已提交
624
	return q->mq_ops;
625 626
}

627 628 629 630 631 632 633 634 635 636 637 638
#ifdef CONFIG_PM
static inline enum rpm_status queue_rpm_status(struct request_queue *q)
{
	return q->rpm_status;
}
#else
static inline enum rpm_status queue_rpm_status(struct request_queue *q)
{
	return RPM_ACTIVE;
}
#endif

D
Damien Le Moal 已提交
639 640 641
static inline enum blk_zoned_model
blk_queue_zoned_model(struct request_queue *q)
{
642 643 644
	if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
		return q->limits.zoned;
	return BLK_ZONED_NONE;
D
Damien Le Moal 已提交
645 646 647 648 649 650 651 652 653 654 655 656 657
}

static inline bool blk_queue_is_zoned(struct request_queue *q)
{
	switch (blk_queue_zoned_model(q)) {
	case BLK_ZONED_HA:
	case BLK_ZONED_HM:
		return true;
	default:
		return false;
	}
}

658
static inline sector_t blk_queue_zone_sectors(struct request_queue *q)
659 660 661 662
{
	return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
}

663
#ifdef CONFIG_BLK_DEV_ZONED
664 665 666 667 668
static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
{
	return blk_queue_is_zoned(q) ? q->nr_zones : 0;
}

669 670 671 672 673 674 675 676 677 678 679
static inline unsigned int blk_queue_zone_no(struct request_queue *q,
					     sector_t sector)
{
	if (!blk_queue_is_zoned(q))
		return 0;
	return sector >> ilog2(q->limits.chunk_sectors);
}

static inline bool blk_queue_zone_is_seq(struct request_queue *q,
					 sector_t sector)
{
680
	if (!blk_queue_is_zoned(q))
681
		return false;
682 683 684
	if (!q->conv_zones_bitmap)
		return true;
	return !test_bit(blk_queue_zone_no(q, sector), q->conv_zones_bitmap);
685
}
686 687 688 689 690 691 692 693 694 695 696

static inline void blk_queue_max_open_zones(struct request_queue *q,
		unsigned int max_open_zones)
{
	q->max_open_zones = max_open_zones;
}

static inline unsigned int queue_max_open_zones(const struct request_queue *q)
{
	return q->max_open_zones;
}
697 698 699 700 701 702 703 704 705 706 707

static inline void blk_queue_max_active_zones(struct request_queue *q,
		unsigned int max_active_zones)
{
	q->max_active_zones = max_active_zones;
}

static inline unsigned int queue_max_active_zones(const struct request_queue *q)
{
	return q->max_active_zones;
}
708 709 710 711 712
#else /* CONFIG_BLK_DEV_ZONED */
static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
{
	return 0;
}
713 714 715 716 717 718 719 720 721 722
static inline bool blk_queue_zone_is_seq(struct request_queue *q,
					 sector_t sector)
{
	return false;
}
static inline unsigned int blk_queue_zone_no(struct request_queue *q,
					     sector_t sector)
{
	return 0;
}
723 724 725 726
static inline unsigned int queue_max_open_zones(const struct request_queue *q)
{
	return 0;
}
727 728 729 730
static inline unsigned int queue_max_active_zones(const struct request_queue *q)
{
	return 0;
}
731
#endif /* CONFIG_BLK_DEV_ZONED */
732

733 734 735 736 737 738 739 740
static inline unsigned int blk_queue_depth(struct request_queue *q)
{
	if (q->queue_depth)
		return q->queue_depth;

	return q->nr_requests;
}

741 742 743 744
/*
 * default timeout for SG_IO if none specified
 */
#define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
L
Linus Torvalds 已提交
745
#define BLK_MIN_SG_TIMEOUT	(7 * HZ)
746

747
/* This should not be used directly - use rq_for_each_segment */
748 749
#define for_each_bio(_bio)		\
	for (; _bio; _bio = _bio->bi_next)
750

C
Christoph Hellwig 已提交
751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862
int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
				 const struct attribute_group **groups);
static inline int __must_check add_disk(struct gendisk *disk)
{
	return device_add_disk(NULL, disk, NULL);
}
void del_gendisk(struct gendisk *gp);
void invalidate_disk(struct gendisk *disk);
void set_disk_ro(struct gendisk *disk, bool read_only);
void disk_uevent(struct gendisk *disk, enum kobject_action action);

static inline int get_disk_ro(struct gendisk *disk)
{
	return disk->part0->bd_read_only ||
		test_bit(GD_READ_ONLY, &disk->state);
}

static inline int bdev_read_only(struct block_device *bdev)
{
	return bdev->bd_read_only || get_disk_ro(bdev->bd_disk);
}

bool set_capacity_and_notify(struct gendisk *disk, sector_t size);
bool disk_force_media_change(struct gendisk *disk, unsigned int events);

void add_disk_randomness(struct gendisk *disk) __latent_entropy;
void rand_initialize_disk(struct gendisk *disk);

static inline sector_t get_start_sect(struct block_device *bdev)
{
	return bdev->bd_start_sect;
}

static inline sector_t bdev_nr_sectors(struct block_device *bdev)
{
	return bdev->bd_nr_sectors;
}

static inline loff_t bdev_nr_bytes(struct block_device *bdev)
{
	return (loff_t)bdev_nr_sectors(bdev) << SECTOR_SHIFT;
}

static inline sector_t get_capacity(struct gendisk *disk)
{
	return bdev_nr_sectors(disk->part0);
}

static inline u64 sb_bdev_nr_blocks(struct super_block *sb)
{
	return bdev_nr_sectors(sb->s_bdev) >>
		(sb->s_blocksize_bits - SECTOR_SHIFT);
}

int bdev_disk_changed(struct gendisk *disk, bool invalidate);

struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
		struct lock_class_key *lkclass);
void put_disk(struct gendisk *disk);
struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass);

/**
 * blk_alloc_disk - allocate a gendisk structure
 * @node_id: numa node to allocate on
 *
 * Allocate and pre-initialize a gendisk structure for use with BIO based
 * drivers.
 *
 * Context: can sleep
 */
#define blk_alloc_disk(node_id)						\
({									\
	static struct lock_class_key __key;				\
									\
	__blk_alloc_disk(node_id, &__key);				\
})
void blk_cleanup_disk(struct gendisk *disk);

int __register_blkdev(unsigned int major, const char *name,
		void (*probe)(dev_t devt));
#define register_blkdev(major, name) \
	__register_blkdev(major, name, NULL)
void unregister_blkdev(unsigned int major, const char *name);

bool bdev_check_media_change(struct block_device *bdev);
int __invalidate_device(struct block_device *bdev, bool kill_dirty);
void set_capacity(struct gendisk *disk, sector_t size);

#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk);
int bd_register_pending_holders(struct gendisk *disk);
#else
static inline int bd_link_disk_holder(struct block_device *bdev,
				      struct gendisk *disk)
{
	return 0;
}
static inline void bd_unlink_disk_holder(struct block_device *bdev,
					 struct gendisk *disk)
{
}
static inline int bd_register_pending_holders(struct gendisk *disk)
{
	return 0;
}
#endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */

dev_t part_devt(struct gendisk *disk, u8 partno);
void inc_diskseq(struct gendisk *disk);
dev_t blk_lookup_devt(const char *name, int partno);
void blk_request_module(dev_t devt);
863

L
Linus Torvalds 已提交
864 865
extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk);
866
void submit_bio_noacct(struct bio *bio);
867

868
extern int blk_lld_busy(struct request_queue *q);
869
extern void blk_queue_split(struct bio **);
870
extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
871
extern void blk_queue_exit(struct request_queue *q);
L
Linus Torvalds 已提交
872
extern void blk_sync_queue(struct request_queue *q);
873

874 875 876
/* Helper to convert REQ_OP_XXX to its string format XXX */
extern const char *blk_op_str(unsigned int op);

877 878 879
int blk_status_to_errno(blk_status_t status);
blk_status_t errno_to_blk_status(int errno);

880 881
/* only poll the hardware once, don't continue until a completion was found */
#define BLK_POLL_ONESHOT		(1 << 0)
882 883
/* do not sleep to wait for the expected completion time */
#define BLK_POLL_NOSLEEP		(1 << 1)
884 885 886
int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags);
int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
			unsigned int flags);
J
Jens Axboe 已提交
887

888
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
L
Linus Torvalds 已提交
889
{
890
	return bdev->bd_queue;	/* this is never NULL */
L
Linus Torvalds 已提交
891 892
}

893
#ifdef CONFIG_BLK_DEV_ZONED
894 895 896 897

/* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */
const char *blk_zone_cond_str(enum blk_zone_cond zone_cond);

898 899 900 901 902 903 904 905 906 907 908
static inline unsigned int bio_zone_no(struct bio *bio)
{
	return blk_queue_zone_no(bdev_get_queue(bio->bi_bdev),
				 bio->bi_iter.bi_sector);
}

static inline unsigned int bio_zone_is_seq(struct bio *bio)
{
	return blk_queue_zone_is_seq(bdev_get_queue(bio->bi_bdev),
				     bio->bi_iter.bi_sector);
}
909
#endif /* CONFIG_BLK_DEV_ZONED */
910

911
static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
912
						     int op)
913
{
A
Adrian Hunter 已提交
914
	if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
915 916
		return min(q->limits.max_discard_sectors,
			   UINT_MAX >> SECTOR_SHIFT);
917

918
	if (unlikely(op == REQ_OP_WRITE_SAME))
919 920
		return q->limits.max_write_same_sectors;

921 922 923
	if (unlikely(op == REQ_OP_WRITE_ZEROES))
		return q->limits.max_write_zeroes_sectors;

924 925 926
	return q->limits.max_sectors;
}

927 928 929 930 931
/*
 * Return maximum size of a request at given offset. Only valid for
 * file system requests.
 */
static inline unsigned int blk_max_size_offset(struct request_queue *q,
M
Mike Snitzer 已提交
932 933
					       sector_t offset,
					       unsigned int chunk_sectors)
934
{
935 936 937 938 939 940
	if (!chunk_sectors) {
		if (q->limits.chunk_sectors)
			chunk_sectors = q->limits.chunk_sectors;
		else
			return q->limits.max_sectors;
	}
941

942 943 944 945 946 947
	if (likely(is_power_of_2(chunk_sectors)))
		chunk_sectors -= offset & (chunk_sectors - 1);
	else
		chunk_sectors -= sector_div(offset, chunk_sectors);

	return min(q->limits.max_sectors, chunk_sectors);
948 949
}

L
Linus Torvalds 已提交
950 951 952
/*
 * Access functions for manipulating queue properties
 */
953
extern void blk_cleanup_queue(struct request_queue *);
954
void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit);
955
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
956
extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
957
extern void blk_queue_max_segments(struct request_queue *, unsigned short);
958 959
extern void blk_queue_max_discard_segments(struct request_queue *,
		unsigned short);
960
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
961 962
extern void blk_queue_max_discard_sectors(struct request_queue *q,
		unsigned int max_discard_sectors);
963 964
extern void blk_queue_max_write_same_sectors(struct request_queue *q,
		unsigned int max_write_same_sectors);
965 966
extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
		unsigned int max_write_same_sectors);
967
extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
968 969
extern void blk_queue_max_zone_append_sectors(struct request_queue *q,
		unsigned int max_zone_append_sectors);
970
extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
971 972
void blk_queue_zone_write_granularity(struct request_queue *q,
				      unsigned int size);
973 974
extern void blk_queue_alignment_offset(struct request_queue *q,
				       unsigned int alignment);
975
void disk_update_readahead(struct gendisk *disk);
976
extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
977
extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
978
extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
979
extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
980
extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
981
extern void blk_set_default_limits(struct queue_limits *lim);
982
extern void blk_set_stacking_limits(struct queue_limits *lim);
983 984 985 986
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
			    sector_t offset);
extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
			      sector_t offset);
987
extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
988
extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
989
extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
990
extern void blk_queue_dma_alignment(struct request_queue *, int);
991
extern void blk_queue_update_dma_alignment(struct request_queue *, int);
J
Jens Axboe 已提交
992
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
993
extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
994

995 996 997 998 999
struct blk_independent_access_ranges *
disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges);
void disk_set_independent_access_ranges(struct gendisk *disk,
				struct blk_independent_access_ranges *iars);

1000 1001 1002 1003 1004 1005 1006 1007
/*
 * Elevator features for blk_queue_required_elevator_features:
 */
/* Supports zoned block devices sequential write constraint */
#define ELEVATOR_F_ZBD_SEQ_WRITE	(1U << 0)
/* Supports scheduling on multiple hardware queues */
#define ELEVATOR_F_MQ_AWARE		(1U << 1)

1008 1009
extern void blk_queue_required_elevator_features(struct request_queue *q,
						 unsigned int features);
1010 1011
extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
					      struct device *dev);
L
Linus Torvalds 已提交
1012

T
Tejun Heo 已提交
1013
bool __must_check blk_get_queue(struct request_queue *);
1014
extern void blk_put_queue(struct request_queue *);
1015
extern void blk_set_queue_dying(struct request_queue *);
L
Linus Torvalds 已提交
1016

1017
#ifdef CONFIG_BLOCK
1018
/*
S
Suresh Jayaraman 已提交
1019 1020 1021 1022 1023 1024 1025
 * blk_plug permits building a queue of related requests by holding the I/O
 * fragments for a short period. This allows merging of sequential requests
 * into single larger request. As the requests are moved from a per-task list to
 * the device's request_queue in a batch, this results in improved scalability
 * as the lock contention for request_queue lock is reduced.
 *
 * It is ok not to disable preemption when adding the request to the plug list
1026 1027
 * or when attempting a merge. For details, please see schedule() where
 * blk_flush_plug() is called.
1028
 */
1029
struct blk_plug {
1030
	struct request *mq_list; /* blk-mq requests */
1031 1032 1033 1034 1035

	/* if ios_left is > 1, we can batch tag/rq allocations */
	struct request *cached_rq;
	unsigned short nr_ios;

1036
	unsigned short rq_count;
1037

1038
	bool multiple_queues;
1039
	bool has_elevator;
1040
	bool nowait;
1041 1042

	struct list_head cb_list; /* md requires an unplug callback */
1043
};
1044

1045
struct blk_plug_cb;
1046
typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
1047 1048
struct blk_plug_cb {
	struct list_head list;
1049 1050
	blk_plug_cb_fn callback;
	void *data;
1051
};
1052 1053
extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
					     void *data, int size);
1054
extern void blk_start_plug(struct blk_plug *);
1055
extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short);
1056 1057
extern void blk_finish_plug(struct blk_plug *);

1058 1059 1060 1061 1062 1063
void __blk_flush_plug(struct blk_plug *plug, bool from_schedule);
static inline void blk_flush_plug(struct blk_plug *plug, bool async)
{
	if (plug)
		__blk_flush_plug(plug, async);
}
1064

1065
int blkdev_issue_flush(struct block_device *bdev);
1066 1067 1068 1069 1070
long nr_blockdev_pages(void);
#else /* CONFIG_BLOCK */
struct blk_plug {
};

1071 1072 1073 1074 1075
static inline void blk_start_plug_nr_ios(struct blk_plug *plug,
					 unsigned short nr_ios)
{
}

1076 1077 1078 1079 1080 1081 1082 1083
static inline void blk_start_plug(struct blk_plug *plug)
{
}

static inline void blk_finish_plug(struct blk_plug *plug)
{
}

1084
static inline void blk_flush_plug(struct blk_plug *plug, bool async)
1085 1086 1087
{
}

1088
static inline int blkdev_issue_flush(struct block_device *bdev)
1089 1090 1091 1092 1093 1094 1095 1096 1097 1098
{
	return 0;
}

static inline long nr_blockdev_pages(void)
{
	return 0;
}
#endif /* CONFIG_BLOCK */

1099 1100
extern void blk_io_schedule(void);

1101 1102
extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
		sector_t nr_sects, gfp_t gfp_mask, struct page *page);
1103 1104

#define BLKDEV_DISCARD_SECURE	(1 << 0)	/* issue a secure erase */
1105

1106 1107
extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
1108
extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1109
		sector_t nr_sects, gfp_t gfp_mask, int flags,
1110
		struct bio **biop);
1111 1112

#define BLKDEV_ZERO_NOUNMAP	(1 << 0)  /* do not free blocks */
1113
#define BLKDEV_ZERO_NOFALLBACK	(1 << 1)  /* don't write explicit zeroes */
1114

1115 1116
extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
1117
		unsigned flags);
1118
extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1119 1120
		sector_t nr_sects, gfp_t gfp_mask, unsigned flags);

1121 1122
static inline int sb_issue_discard(struct super_block *sb, sector_t block,
		sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
D
David Woodhouse 已提交
1123
{
1124 1125 1126 1127 1128
	return blkdev_issue_discard(sb->s_bdev,
				    block << (sb->s_blocksize_bits -
					      SECTOR_SHIFT),
				    nr_blocks << (sb->s_blocksize_bits -
						  SECTOR_SHIFT),
1129
				    gfp_mask, flags);
D
David Woodhouse 已提交
1130
}
1131
static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
1132
		sector_t nr_blocks, gfp_t gfp_mask)
1133 1134
{
	return blkdev_issue_zeroout(sb->s_bdev,
1135 1136 1137 1138
				    block << (sb->s_blocksize_bits -
					      SECTOR_SHIFT),
				    nr_blocks << (sb->s_blocksize_bits -
						  SECTOR_SHIFT),
1139
				    gfp_mask, 0);
1140
}
L
Linus Torvalds 已提交
1141

1142 1143 1144 1145 1146
static inline bool bdev_is_partition(struct block_device *bdev)
{
	return bdev->bd_partno;
}

1147 1148 1149
enum blk_default_limits {
	BLK_MAX_SEGMENTS	= 128,
	BLK_SAFE_MAX_SECTORS	= 255,
1150
	BLK_DEF_MAX_SECTORS	= 2560,
1151 1152 1153
	BLK_MAX_SEGMENT_SIZE	= 65536,
	BLK_SEG_BOUNDARY_MASK	= 0xFFFFFFFFUL,
};
1154

1155
static inline unsigned long queue_segment_boundary(const struct request_queue *q)
1156
{
1157
	return q->limits.seg_boundary_mask;
1158 1159
}

1160
static inline unsigned long queue_virt_boundary(const struct request_queue *q)
1161 1162 1163 1164
{
	return q->limits.virt_boundary_mask;
}

1165
static inline unsigned int queue_max_sectors(const struct request_queue *q)
1166
{
1167
	return q->limits.max_sectors;
1168 1169
}

1170 1171 1172 1173 1174
static inline unsigned int queue_max_bytes(struct request_queue *q)
{
	return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9;
}

1175
static inline unsigned int queue_max_hw_sectors(const struct request_queue *q)
1176
{
1177
	return q->limits.max_hw_sectors;
1178 1179
}

1180
static inline unsigned short queue_max_segments(const struct request_queue *q)
1181
{
1182
	return q->limits.max_segments;
1183 1184
}

1185
static inline unsigned short queue_max_discard_segments(const struct request_queue *q)
1186 1187 1188 1189
{
	return q->limits.max_discard_segments;
}

1190
static inline unsigned int queue_max_segment_size(const struct request_queue *q)
1191
{
1192
	return q->limits.max_segment_size;
1193 1194
}

1195 1196
static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q)
{
1197 1198 1199 1200

	const struct queue_limits *l = &q->limits;

	return min(l->max_zone_append_sectors, l->max_sectors);
1201 1202
}

1203
static inline unsigned queue_logical_block_size(const struct request_queue *q)
L
Linus Torvalds 已提交
1204 1205 1206
{
	int retval = 512;

1207 1208
	if (q && q->limits.logical_block_size)
		retval = q->limits.logical_block_size;
L
Linus Torvalds 已提交
1209 1210 1211 1212

	return retval;
}

1213
static inline unsigned int bdev_logical_block_size(struct block_device *bdev)
L
Linus Torvalds 已提交
1214
{
1215
	return queue_logical_block_size(bdev_get_queue(bdev));
L
Linus Torvalds 已提交
1216 1217
}

1218
static inline unsigned int queue_physical_block_size(const struct request_queue *q)
1219 1220 1221 1222
{
	return q->limits.physical_block_size;
}

1223
static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
M
Martin K. Petersen 已提交
1224 1225 1226 1227
{
	return queue_physical_block_size(bdev_get_queue(bdev));
}

1228
static inline unsigned int queue_io_min(const struct request_queue *q)
1229 1230 1231 1232
{
	return q->limits.io_min;
}

M
Martin K. Petersen 已提交
1233 1234 1235 1236 1237
static inline int bdev_io_min(struct block_device *bdev)
{
	return queue_io_min(bdev_get_queue(bdev));
}

1238
static inline unsigned int queue_io_opt(const struct request_queue *q)
1239 1240 1241 1242
{
	return q->limits.io_opt;
}

M
Martin K. Petersen 已提交
1243 1244 1245 1246 1247
static inline int bdev_io_opt(struct block_device *bdev)
{
	return queue_io_opt(bdev_get_queue(bdev));
}

1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259
static inline unsigned int
queue_zone_write_granularity(const struct request_queue *q)
{
	return q->limits.zone_write_granularity;
}

static inline unsigned int
bdev_zone_write_granularity(struct block_device *bdev)
{
	return queue_zone_write_granularity(bdev_get_queue(bdev));
}

1260
static inline int queue_alignment_offset(const struct request_queue *q)
1261
{
M
Martin K. Petersen 已提交
1262
	if (q->limits.misaligned)
1263 1264
		return -1;

M
Martin K. Petersen 已提交
1265
	return q->limits.alignment_offset;
1266 1267
}

1268
static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
1269 1270
{
	unsigned int granularity = max(lim->physical_block_size, lim->io_min);
1271 1272
	unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
		<< SECTOR_SHIFT;
1273

1274
	return (granularity + lim->alignment_offset - alignment) % granularity;
1275 1276
}

M
Martin K. Petersen 已提交
1277 1278 1279 1280 1281 1282
static inline int bdev_alignment_offset(struct block_device *bdev)
{
	struct request_queue *q = bdev_get_queue(bdev);

	if (q->limits.misaligned)
		return -1;
1283
	if (bdev_is_partition(bdev))
1284
		return queue_limit_alignment_offset(&q->limits,
1285
				bdev->bd_start_sect);
M
Martin K. Petersen 已提交
1286 1287 1288
	return q->limits.alignment_offset;
}

1289
static inline int queue_discard_alignment(const struct request_queue *q)
1290 1291 1292 1293 1294 1295 1296
{
	if (q->limits.discard_misaligned)
		return -1;

	return q->limits.discard_alignment;
}

1297
static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
1298
{
1299
	unsigned int alignment, granularity, offset;
1300

1301 1302 1303
	if (!lim->max_discard_sectors)
		return 0;

1304
	/* Why are these in bytes, not sectors? */
1305 1306
	alignment = lim->discard_alignment >> SECTOR_SHIFT;
	granularity = lim->discard_granularity >> SECTOR_SHIFT;
1307 1308 1309 1310 1311 1312 1313 1314 1315 1316
	if (!granularity)
		return 0;

	/* Offset of the partition start in 'granularity' sectors */
	offset = sector_div(sector, granularity);

	/* And why do we do this modulus *again* in blkdev_issue_discard()? */
	offset = (granularity + alignment - offset) % granularity;

	/* Turn it back into bytes, gaah */
1317
	return offset << SECTOR_SHIFT;
1318 1319
}

1320 1321 1322 1323
static inline int bdev_discard_alignment(struct block_device *bdev)
{
	struct request_queue *q = bdev_get_queue(bdev);

1324
	if (bdev_is_partition(bdev))
1325
		return queue_limit_discard_alignment(&q->limits,
1326
				bdev->bd_start_sect);
1327 1328 1329
	return q->limits.discard_alignment;
}

1330 1331 1332 1333 1334 1335 1336 1337 1338 1339
static inline unsigned int bdev_write_same(struct block_device *bdev)
{
	struct request_queue *q = bdev_get_queue(bdev);

	if (q)
		return q->limits.max_write_same_sectors;

	return 0;
}

1340 1341 1342 1343 1344 1345 1346 1347 1348 1349
static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
{
	struct request_queue *q = bdev_get_queue(bdev);

	if (q)
		return q->limits.max_write_zeroes_sectors;

	return 0;
}

D
Damien Le Moal 已提交
1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369
static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
{
	struct request_queue *q = bdev_get_queue(bdev);

	if (q)
		return blk_queue_zoned_model(q);

	return BLK_ZONED_NONE;
}

static inline bool bdev_is_zoned(struct block_device *bdev)
{
	struct request_queue *q = bdev_get_queue(bdev);

	if (q)
		return blk_queue_is_zoned(q);

	return false;
}

1370
static inline sector_t bdev_zone_sectors(struct block_device *bdev)
1371 1372 1373 1374
{
	struct request_queue *q = bdev_get_queue(bdev);

	if (q)
1375
		return blk_queue_zone_sectors(q);
1376 1377
	return 0;
}
1378

1379 1380 1381 1382 1383 1384 1385 1386 1387
static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
{
	struct request_queue *q = bdev_get_queue(bdev);

	if (q)
		return queue_max_open_zones(q);
	return 0;
}

1388 1389 1390 1391 1392 1393 1394 1395 1396
static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
{
	struct request_queue *q = bdev_get_queue(bdev);

	if (q)
		return queue_max_active_zones(q);
	return 0;
}

1397
static inline int queue_dma_alignment(const struct request_queue *q)
L
Linus Torvalds 已提交
1398
{
1399
	return q ? q->dma_alignment : 511;
L
Linus Torvalds 已提交
1400 1401
}

1402
static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
1403 1404 1405
				 unsigned int len)
{
	unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
1406
	return !(addr & alignment) && !(len & alignment);
1407 1408
}

L
Linus Torvalds 已提交
1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419
/* assumes size > 256 */
static inline unsigned int blksize_bits(unsigned int size)
{
	unsigned int bits = 8;
	do {
		bits++;
		size >>= 1;
	} while (size > 256);
	return bits;
}

1420
static inline unsigned int block_size(struct block_device *bdev)
L
Linus Torvalds 已提交
1421
{
1422
	return 1 << bdev->bd_inode->i_blkbits;
L
Linus Torvalds 已提交
1423 1424
}

1425
int kblockd_schedule_work(struct work_struct *work);
1426
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
L
Linus Torvalds 已提交
1427 1428 1429 1430 1431 1432

#define MODULE_ALIAS_BLOCKDEV(major,minor) \
	MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
	MODULE_ALIAS("block-major-" __stringify(major) "-*")

1433 1434
#ifdef CONFIG_BLK_INLINE_ENCRYPTION

1435 1436
bool blk_crypto_register(struct blk_crypto_profile *profile,
			 struct request_queue *q);
1437 1438 1439

#else /* CONFIG_BLK_INLINE_ENCRYPTION */

1440 1441
static inline bool blk_crypto_register(struct blk_crypto_profile *profile,
				       struct request_queue *q)
1442 1443 1444 1445 1446 1447
{
	return true;
}

#endif /* CONFIG_BLK_INLINE_ENCRYPTION */

1448 1449 1450 1451 1452 1453 1454 1455
enum blk_unique_id {
	/* these match the Designator Types specified in SPC */
	BLK_UID_T10	= 1,
	BLK_UID_EUI64	= 2,
	BLK_UID_NAA	= 3,
};

#define NFL4_UFLG_MASK			0x0000003F
1456

1457
struct block_device_operations {
1458
	void (*submit_bio)(struct bio *bio);
A
Al Viro 已提交
1459
	int (*open) (struct block_device *, fmode_t);
1460
	void (*release) (struct gendisk *, fmode_t);
1461
	int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int);
A
Al Viro 已提交
1462 1463
	int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
	int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1464 1465
	unsigned int (*check_events) (struct gendisk *disk,
				      unsigned int clearing);
1466
	void (*unlock_native_capacity) (struct gendisk *);
1467
	int (*getgeo)(struct block_device *, struct hd_geometry *);
1468
	int (*set_read_only)(struct block_device *bdev, bool ro);
1469
	void (*free_disk)(struct gendisk *disk);
1470 1471
	/* this callback is with swap_lock and sometimes page table lock held */
	void (*swap_slot_free_notify) (struct block_device *, unsigned long);
1472
	int (*report_zones)(struct gendisk *, sector_t sector,
C
Christoph Hellwig 已提交
1473
			unsigned int nr_zones, report_zones_cb cb, void *data);
1474
	char *(*devnode)(struct gendisk *disk, umode_t *mode);
1475 1476 1477
	/* returns the length of the identifier or a negative errno: */
	int (*get_unique_id)(struct gendisk *disk, u8 id[16],
			enum blk_unique_id id_type);
1478
	struct module *owner;
1479
	const struct pr_ops *pr_ops;
1480 1481 1482 1483 1484 1485 1486

	/*
	 * Special callback for probing GPT entry at a given sector.
	 * Needed by Android devices, used by GPT scanner and MMC blk
	 * driver.
	 */
	int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector);
1487 1488
};

1489 1490 1491 1492 1493 1494 1495
#ifdef CONFIG_COMPAT
extern int blkdev_compat_ptr_ioctl(struct block_device *, fmode_t,
				      unsigned int, unsigned long);
#else
#define blkdev_compat_ptr_ioctl NULL
#endif

1496 1497 1498
extern int bdev_read_page(struct block_device *, sector_t, struct page *);
extern int bdev_write_page(struct block_device *, sector_t, struct page *,
						struct writeback_control *);
1499

J
Jens Axboe 已提交
1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512
static inline void blk_wake_io_task(struct task_struct *waiter)
{
	/*
	 * If we're polling, the task itself is doing the completions. For
	 * that case, we don't need to signal a wakeup, it's enough to just
	 * mark us as RUNNING.
	 */
	if (waiter == current)
		__set_current_state(TASK_RUNNING);
	else
		wake_up_process(waiter);
}

1513 1514 1515 1516 1517
unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
		unsigned int op);
void disk_end_io_acct(struct gendisk *disk, unsigned int op,
		unsigned long start_time);

1518
void bio_start_io_acct_time(struct bio *bio, unsigned long start_time);
1519 1520 1521
unsigned long bio_start_io_acct(struct bio *bio);
void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
		struct block_device *orig_bdev);
1522 1523 1524 1525

/**
 * bio_end_io_acct - end I/O accounting for bio based drivers
 * @bio:	bio to end account for
1526
 * @start_time:	start time returned by bio_start_io_acct()
1527 1528 1529
 */
static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time)
{
1530
	return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev);
1531 1532
}

1533 1534 1535 1536
int bdev_read_only(struct block_device *bdev);
int set_blocksize(struct block_device *bdev, int size);

const char *bdevname(struct block_device *bdev, char *buffer);
C
Christoph Hellwig 已提交
1537
int lookup_bdev(const char *pathname, dev_t *dev);
1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551

void blkdev_show(struct seq_file *seqf, off_t offset);

#define BDEVNAME_SIZE	32	/* Largest string for a blockdev identifier */
#define BDEVT_SIZE	10	/* Largest string for MAJ:MIN for blkdev */
#ifdef CONFIG_BLOCK
#define BLKDEV_MAJOR_MAX	512
#else
#define BLKDEV_MAJOR_MAX	0
#endif

struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
		void *holder);
struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder);
1552 1553
int bd_prepare_to_claim(struct block_device *bdev, void *holder);
void bd_abort_claiming(struct block_device *bdev, void *holder);
1554 1555
void blkdev_put(struct block_device *bdev, fmode_t mode);

1556 1557 1558 1559 1560 1561
/* just for blk-cgroup, don't use elsewhere */
struct block_device *blkdev_get_no_open(dev_t dev);
void blkdev_put_no_open(struct block_device *bdev);

struct block_device *bdev_alloc(struct gendisk *disk, u8 partno);
void bdev_add(struct block_device *bdev, dev_t dev);
1562
struct block_device *I_BDEV(struct inode *inode);
1563 1564
int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart,
		loff_t lend);
1565 1566 1567 1568

#ifdef CONFIG_BLOCK
void invalidate_bdev(struct block_device *bdev);
int sync_blockdev(struct block_device *bdev);
C
Christoph Hellwig 已提交
1569
int sync_blockdev_nowait(struct block_device *bdev);
1570
void sync_bdevs(bool wait);
C
Christoph Hellwig 已提交
1571
void printk_all_partitions(void);
1572 1573 1574 1575 1576 1577 1578 1579
#else
static inline void invalidate_bdev(struct block_device *bdev)
{
}
static inline int sync_blockdev(struct block_device *bdev)
{
	return 0;
}
C
Christoph Hellwig 已提交
1580 1581 1582 1583
static inline int sync_blockdev_nowait(struct block_device *bdev)
{
	return 0;
}
1584 1585 1586
static inline void sync_bdevs(bool wait)
{
}
C
Christoph Hellwig 已提交
1587 1588 1589 1590 1591
static inline void printk_all_partitions(void)
{
}
#endif /* CONFIG_BLOCK */

1592 1593
int fsync_bdev(struct block_device *bdev);

1594 1595
int freeze_bdev(struct block_device *bdev);
int thaw_bdev(struct block_device *bdev);
1596

1597 1598 1599 1600 1601 1602 1603 1604
struct io_comp_batch {
	struct request *req_list;
	bool need_ts;
	void (*complete)(struct io_comp_batch *);
};

#define DEFINE_IO_COMP_BATCH(name)	struct io_comp_batch name = { }

1605
#endif /* _LINUX_BLKDEV_H */