blkdev.h 37.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
#ifndef _LINUX_BLKDEV_H
#define _LINUX_BLKDEV_H

J
Jens Axboe 已提交
4 5
#ifdef CONFIG_BLOCK

6
#include <linux/sched.h>
L
Linus Torvalds 已提交
7 8 9 10 11 12 13 14 15 16 17 18
#include <linux/major.h>
#include <linux/genhd.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/pagemap.h>
#include <linux/backing-dev.h>
#include <linux/wait.h>
#include <linux/mempool.h>
#include <linux/bio.h>
#include <linux/module.h>
#include <linux/stringify.h>
19
#include <linux/gfp.h>
20
#include <linux/bsg.h>
21
#include <linux/smp.h>
L
Linus Torvalds 已提交
22 23 24

#include <asm/scatterlist.h>

25 26
struct scsi_ioctl_command;

L
Linus Torvalds 已提交
27 28 29
struct request_queue;
struct elevator_queue;
struct request_pm_state;
30
struct blk_trace;
31 32
struct request;
struct sg_io_hdr;
L
Linus Torvalds 已提交
33 34 35 36 37

#define BLKDEV_MIN_RQ	4
#define BLKDEV_MAX_RQ	128	/* Default maximum */

struct request;
38
typedef void (rq_end_io_fn)(struct request *, int);
L
Linus Torvalds 已提交
39 40

struct request_list {
41 42 43 44
	/*
	 * count[], starved[], and wait[] are indexed by
	 * BLK_RW_SYNC/BLK_RW_ASYNC
	 */
L
Linus Torvalds 已提交
45 46
	int count[2];
	int starved[2];
T
Tejun Heo 已提交
47
	int elvpriv;
L
Linus Torvalds 已提交
48 49 50 51
	mempool_t *rq_pool;
	wait_queue_head_t wait[2];
};

52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
/*
 * request command types
 */
enum rq_cmd_type_bits {
	REQ_TYPE_FS		= 1,	/* fs request */
	REQ_TYPE_BLOCK_PC,		/* scsi command */
	REQ_TYPE_SENSE,			/* sense request */
	REQ_TYPE_PM_SUSPEND,		/* suspend request */
	REQ_TYPE_PM_RESUME,		/* resume request */
	REQ_TYPE_PM_SHUTDOWN,		/* shutdown request */
	REQ_TYPE_SPECIAL,		/* driver defined type */
	REQ_TYPE_LINUX_BLOCK,		/* generic block layer message */
	/*
	 * for ATA/ATAPI devices. this really doesn't belong here, ide should
	 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver
	 * private REQ_LB opcodes to differentiate what type of request this is
	 */
	REQ_TYPE_ATA_TASKFILE,
70
	REQ_TYPE_ATA_PC,
71 72
};

73 74 75 76 77
enum {
	BLK_RW_ASYNC	= 0,
	BLK_RW_SYNC	= 1,
};

78 79 80 81 82 83 84 85 86 87 88
/*
 * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being
 * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a
 * SCSI cdb.
 *
 * 0x00 -> 0x3f are driver private, to be used for whatever purpose they need,
 * typically to differentiate REQ_TYPE_SPECIAL requests.
 *
 */
enum {
	REQ_LB_OP_EJECT	= 0x40,		/* eject request */
D
David Woodhouse 已提交
89
	REQ_LB_OP_FLUSH = 0x41,		/* flush request */
90
	REQ_LB_OP_DISCARD = 0x42,	/* discard sectors */
91 92 93
};

/*
94
 * request type modified bits. first two bits match BIO_RW* bits, important
95 96 97
 */
enum rq_flag_bits {
	__REQ_RW,		/* not set, read. set, write */
98 99 100
	__REQ_FAILFAST_DEV,	/* no driver retries of device errors */
	__REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
	__REQ_FAILFAST_DRIVER,	/* no driver retries of driver errors */
D
David Woodhouse 已提交
101
	__REQ_DISCARD,		/* request to discard sectors */
102 103 104 105 106 107 108 109 110 111 112 113 114
	__REQ_SORTED,		/* elevator knows about this request */
	__REQ_SOFTBARRIER,	/* may not be passed by ioscheduler */
	__REQ_HARDBARRIER,	/* may not be passed by drive either */
	__REQ_FUA,		/* forced unit access */
	__REQ_NOMERGE,		/* don't touch this for merging */
	__REQ_STARTED,		/* drive already may have started this one */
	__REQ_DONTPREP,		/* don't call prep for this one */
	__REQ_QUEUED,		/* uses queueing */
	__REQ_ELVPRIV,		/* elevator private data attached */
	__REQ_FAILED,		/* set if the request failed */
	__REQ_QUIET,		/* don't worry about errors */
	__REQ_PREEMPT,		/* set for "ide_preempt" requests */
	__REQ_ORDERED_COLOR,	/* is before or after barrier */
115
	__REQ_RW_SYNC,		/* request is sync (sync write or read) */
116
	__REQ_ALLOCED,		/* request came from our alloc pool */
117
	__REQ_RW_META,		/* metadata io request */
118
	__REQ_COPY_USER,	/* contains copies of user pages */
119
	__REQ_INTEGRITY,	/* integrity metadata has been remapped */
120
	__REQ_NOIDLE,		/* Don't anticipate more IO after this one */
121
	__REQ_IO_STAT,		/* account I/O stat */
122 123 124 125
	__REQ_NR_BITS,		/* stops here */
};

#define REQ_RW		(1 << __REQ_RW)
126 127 128
#define REQ_FAILFAST_DEV	(1 << __REQ_FAILFAST_DEV)
#define REQ_FAILFAST_TRANSPORT	(1 << __REQ_FAILFAST_TRANSPORT)
#define REQ_FAILFAST_DRIVER	(1 << __REQ_FAILFAST_DRIVER)
D
David Woodhouse 已提交
129
#define REQ_DISCARD	(1 << __REQ_DISCARD)
130 131 132 133 134 135 136 137 138 139 140 141 142 143
#define REQ_SORTED	(1 << __REQ_SORTED)
#define REQ_SOFTBARRIER	(1 << __REQ_SOFTBARRIER)
#define REQ_HARDBARRIER	(1 << __REQ_HARDBARRIER)
#define REQ_FUA		(1 << __REQ_FUA)
#define REQ_NOMERGE	(1 << __REQ_NOMERGE)
#define REQ_STARTED	(1 << __REQ_STARTED)
#define REQ_DONTPREP	(1 << __REQ_DONTPREP)
#define REQ_QUEUED	(1 << __REQ_QUEUED)
#define REQ_ELVPRIV	(1 << __REQ_ELVPRIV)
#define REQ_FAILED	(1 << __REQ_FAILED)
#define REQ_QUIET	(1 << __REQ_QUIET)
#define REQ_PREEMPT	(1 << __REQ_PREEMPT)
#define REQ_ORDERED_COLOR	(1 << __REQ_ORDERED_COLOR)
#define REQ_RW_SYNC	(1 << __REQ_RW_SYNC)
144
#define REQ_ALLOCED	(1 << __REQ_ALLOCED)
145
#define REQ_RW_META	(1 << __REQ_RW_META)
146
#define REQ_COPY_USER	(1 << __REQ_COPY_USER)
147
#define REQ_INTEGRITY	(1 << __REQ_INTEGRITY)
148
#define REQ_NOIDLE	(1 << __REQ_NOIDLE)
149
#define REQ_IO_STAT	(1 << __REQ_IO_STAT)
150

L
Linus Torvalds 已提交
151 152 153
#define BLK_MAX_CDB	16

/*
J
Jens Axboe 已提交
154 155 156
 * try to put the fields that are referenced together in the same cacheline.
 * if you modify this structure, be sure to check block/blk-core.c:rq_init()
 * as well!
L
Linus Torvalds 已提交
157 158
 */
struct request {
159
	struct list_head queuelist;
160 161
	struct call_single_data csd;
	int cpu;
162

163
	struct request_queue *q;
164

165 166
	unsigned int cmd_flags;
	enum rq_cmd_type_bits cmd_type;
J
Jens Axboe 已提交
167
	unsigned long atomic_flags;
L
Linus Torvalds 已提交
168

169 170 171
	/* the following two fields are internal, NEVER access directly */
	sector_t __sector;		/* sector cursor */
	unsigned int __data_len;	/* total data len */
L
Linus Torvalds 已提交
172 173 174 175

	struct bio *bio;
	struct bio *biotail;

176
	struct hlist_node hash;	/* merge hash */
177 178 179 180 181 182 183 184 185
	/*
	 * The rb_node is only used inside the io scheduler, requests
	 * are pruned when moved to the dispatch queue. So let the
	 * completion_data share space with the rb_node.
	 */
	union {
		struct rb_node rb_node;	/* sort/lookup */
		void *completion_data;
	};
186

187 188 189 190
	/*
	 * two pointers are available for the IO schedulers, if they need
	 * more they have to dynamically allocate it.
	 */
L
Linus Torvalds 已提交
191
	void *elevator_private;
192 193
	void *elevator_private2;

194
	struct gendisk *rq_disk;
L
Linus Torvalds 已提交
195 196 197 198 199 200 201
	unsigned long start_time;

	/* Number of scatter-gather DMA addr+len pairs after
	 * physical address coalescing is performed.
	 */
	unsigned short nr_phys_segments;

202 203
	unsigned short ioprio;

T
Tejun Heo 已提交
204 205
	void *special;		/* opaque pointer available for LLD use */
	char *buffer;		/* kaddr of the current segment if available */
L
Linus Torvalds 已提交
206

207 208 209 210 211
	int tag;
	int errors;

	int ref_count;

L
Linus Torvalds 已提交
212 213 214
	/*
	 * when request is used as a packet command carrier
	 */
215 216 217
	unsigned short cmd_len;
	unsigned char __cmd[BLK_MAX_CDB];
	unsigned char *cmd;
L
Linus Torvalds 已提交
218

219
	unsigned int extra_len;	/* length of alignment and padding */
L
Linus Torvalds 已提交
220
	unsigned int sense_len;
T
Tejun Heo 已提交
221
	unsigned int resid_len;	/* residual count */
L
Linus Torvalds 已提交
222 223
	void *sense;

J
Jens Axboe 已提交
224 225
	unsigned long deadline;
	struct list_head timeout_list;
L
Linus Torvalds 已提交
226
	unsigned int timeout;
227
	int retries;
L
Linus Torvalds 已提交
228 229

	/*
230
	 * completion callback.
L
Linus Torvalds 已提交
231 232 233
	 */
	rq_end_io_fn *end_io;
	void *end_io_data;
234 235 236

	/* for bidi */
	struct request *next_rq;
L
Linus Torvalds 已提交
237 238
};

239 240 241 242 243
static inline unsigned short req_get_ioprio(struct request *req)
{
	return req->ioprio;
}

L
Linus Torvalds 已提交
244
/*
245
 * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME
L
Linus Torvalds 已提交
246 247 248 249 250 251 252 253 254 255 256 257 258
 * requests. Some step values could eventually be made generic.
 */
struct request_pm_state
{
	/* PM state machine step value, currently driver specific */
	int	pm_step;
	/* requested PM state value (S1, S2, S3, S4, ...) */
	u32	pm_state;
	void*	data;		/* for driver use */
};

#include <linux/elevator.h>

259 260 261 262
typedef void (request_fn_proc) (struct request_queue *q);
typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
typedef int (prep_rq_fn) (struct request_queue *, struct request *);
typedef void (unplug_fn) (struct request_queue *);
D
David Woodhouse 已提交
263
typedef int (prepare_discard_fn) (struct request_queue *, struct request *);
L
Linus Torvalds 已提交
264 265

struct bio_vec;
266 267 268 269 270 271 272 273
struct bvec_merge_data {
	struct block_device *bi_bdev;
	sector_t bi_sector;
	unsigned bi_size;
	unsigned long bi_rw;
};
typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
			     struct bio_vec *);
274
typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
275
typedef void (softirq_done_fn)(struct request *);
276
typedef int (dma_drain_needed_fn)(struct request *);
277
typedef int (lld_busy_fn) (struct request_queue *q);
L
Linus Torvalds 已提交
278

J
Jens Axboe 已提交
279 280 281 282 283 284 285 286
enum blk_eh_timer_return {
	BLK_EH_NOT_HANDLED,
	BLK_EH_HANDLED,
	BLK_EH_RESET_TIMER,
};

typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);

L
Linus Torvalds 已提交
287 288 289 290 291 292 293 294 295 296
enum blk_queue_state {
	Queue_down,
	Queue_up,
};

struct blk_queue_tag {
	struct request **tag_index;	/* map of busy tags */
	unsigned long *tag_map;		/* bit map of free/busy tags */
	int busy;			/* current depth */
	int max_depth;			/* what we will send to device */
297
	int real_max_depth;		/* what the array can hold */
L
Linus Torvalds 已提交
298 299 300
	atomic_t refcnt;		/* map can be shared */
};

301 302 303
#define BLK_SCSI_MAX_CMDS	(256)
#define BLK_SCSI_CMD_PER_LONG	(BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))

304
struct blk_cmd_filter {
305 306 307 308 309
	unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
	unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
	struct kobject kobj;
};

L
Linus Torvalds 已提交
310 311 312 313 314 315 316
struct request_queue
{
	/*
	 * Together with queue_head for cacheline sharing
	 */
	struct list_head	queue_head;
	struct request		*last_merge;
J
Jens Axboe 已提交
317
	struct elevator_queue	*elevator;
L
Linus Torvalds 已提交
318 319 320 321 322 323 324 325 326 327

	/*
	 * the queue request freelist, one for reads and one for writes
	 */
	struct request_list	rq;

	request_fn_proc		*request_fn;
	make_request_fn		*make_request_fn;
	prep_rq_fn		*prep_rq_fn;
	unplug_fn		*unplug_fn;
D
David Woodhouse 已提交
328
	prepare_discard_fn	*prepare_discard_fn;
L
Linus Torvalds 已提交
329 330
	merge_bvec_fn		*merge_bvec_fn;
	prepare_flush_fn	*prepare_flush_fn;
331
	softirq_done_fn		*softirq_done_fn;
J
Jens Axboe 已提交
332
	rq_timed_out_fn		*rq_timed_out_fn;
333
	dma_drain_needed_fn	*dma_drain_needed;
334
	lld_busy_fn		*lld_busy_fn;
L
Linus Torvalds 已提交
335

336 337 338
	/*
	 * Dispatch queue sorting
	 */
J
Jens Axboe 已提交
339
	sector_t		end_sector;
340 341
	struct request		*boundary_rq;

L
Linus Torvalds 已提交
342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361
	/*
	 * Auto-unplugging state
	 */
	struct timer_list	unplug_timer;
	int			unplug_thresh;	/* After this many requests */
	unsigned long		unplug_delay;	/* After this many jiffies */
	struct work_struct	unplug_work;

	struct backing_dev_info	backing_dev_info;

	/*
	 * The queue owner gets to use this for whatever they like.
	 * ll_rw_blk doesn't touch it.
	 */
	void			*queuedata;

	/*
	 * queue needs bounce pages for pages above this limit
	 */
	unsigned long		bounce_pfn;
A
Al Viro 已提交
362
	gfp_t			bounce_gfp;
L
Linus Torvalds 已提交
363 364 365 366 367 368 369

	/*
	 * various queue flags, see QUEUE_* below
	 */
	unsigned long		queue_flags;

	/*
已提交
370 371 372
	 * protects queue structures from reentrancy. ->__queue_lock should
	 * _never_ be used directly, it is queue private. always use
	 * ->queue_lock.
L
Linus Torvalds 已提交
373
	 */
已提交
374
	spinlock_t		__queue_lock;
L
Linus Torvalds 已提交
375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
	spinlock_t		*queue_lock;

	/*
	 * queue kobject
	 */
	struct kobject kobj;

	/*
	 * queue settings
	 */
	unsigned long		nr_requests;	/* Max # of requests */
	unsigned int		nr_congestion_on;
	unsigned int		nr_congestion_off;
	unsigned int		nr_batching;

390 391
	unsigned int		max_sectors;
	unsigned int		max_hw_sectors;
L
Linus Torvalds 已提交
392 393 394 395 396 397
	unsigned short		max_phys_segments;
	unsigned short		max_hw_segments;
	unsigned short		hardsect_size;
	unsigned int		max_segment_size;

	unsigned long		seg_boundary_mask;
J
James Bottomley 已提交
398 399
	void			*dma_drain_buffer;
	unsigned int		dma_drain_size;
400
	unsigned int		dma_pad_mask;
L
Linus Torvalds 已提交
401 402 403
	unsigned int		dma_alignment;

	struct blk_queue_tag	*queue_tags;
404
	struct list_head	tag_busy_list;
L
Linus Torvalds 已提交
405

406
	unsigned int		nr_sorted;
L
Linus Torvalds 已提交
407 408
	unsigned int		in_flight;

J
Jens Axboe 已提交
409 410 411 412
	unsigned int		rq_timeout;
	struct timer_list	timeout;
	struct list_head	timeout_list;

L
Linus Torvalds 已提交
413 414 415 416 417
	/*
	 * sg stuff
	 */
	unsigned int		sg_timeout;
	unsigned int		sg_reserved_size;
418
	int			node;
419
#ifdef CONFIG_BLK_DEV_IO_TRACE
420
	struct blk_trace	*blk_trace;
421
#endif
L
Linus Torvalds 已提交
422 423 424
	/*
	 * reserved for flush operations
	 */
425 426 427 428
	unsigned int		ordered, next_ordered, ordseq;
	int			orderr, ordcolor;
	struct request		pre_flush_rq, bar_rq, post_flush_rq;
	struct request		*orig_bar_rq;
429 430

	struct mutex		sysfs_lock;
431 432 433 434

#if defined(CONFIG_BLK_DEV_BSG)
	struct bsg_class_device bsg_dev;
#endif
435
	struct blk_cmd_filter cmd_filter;
L
Linus Torvalds 已提交
436 437 438 439 440
};

#define QUEUE_FLAG_CLUSTER	0	/* cluster several segments into 1 */
#define QUEUE_FLAG_QUEUED	1	/* uses generic tag queueing */
#define QUEUE_FLAG_STOPPED	2	/* queue is stopped */
441 442
#define	QUEUE_FLAG_SYNCFULL	3	/* read queue has been filled */
#define QUEUE_FLAG_ASYNCFULL	4	/* write queue has been filled */
L
Linus Torvalds 已提交
443 444 445
#define QUEUE_FLAG_DEAD		5	/* queue being torn down */
#define QUEUE_FLAG_REENTER	6	/* Re-entrancy avoidance */
#define QUEUE_FLAG_PLUGGED	7	/* queue is plugged */
J
Jens Axboe 已提交
446
#define QUEUE_FLAG_ELVSWITCH	8	/* don't use elevator, just do FIFO */
447
#define QUEUE_FLAG_BIDI		9	/* queue supports bidi requests */
448
#define QUEUE_FLAG_NOMERGES    10	/* disable merge attempts */
449
#define QUEUE_FLAG_SAME_COMP   11	/* force complete on same CPU */
450
#define QUEUE_FLAG_FAIL_IO     12	/* fake timeout */
451
#define QUEUE_FLAG_STACKABLE   13	/* supports request stacking */
452
#define QUEUE_FLAG_NONROT      14	/* non-rotational device (SSD) */
453
#define QUEUE_FLAG_VIRT        QUEUE_FLAG_NONROT /* paravirt device */
454 455 456 457
#define QUEUE_FLAG_IO_STAT     15	/* do IO stats */

#define QUEUE_FLAG_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\
				 (1 << QUEUE_FLAG_CLUSTER) |		\
458
				 (1 << QUEUE_FLAG_STACKABLE))
459

460 461
static inline int queue_is_locked(struct request_queue *q)
{
J
Jens Axboe 已提交
462
#ifdef CONFIG_SMP
463 464
	spinlock_t *lock = q->queue_lock;
	return lock && spin_is_locked(lock);
J
Jens Axboe 已提交
465 466 467
#else
	return 1;
#endif
468 469
}

N
Nick Piggin 已提交
470 471 472 473 474 475
static inline void queue_flag_set_unlocked(unsigned int flag,
					   struct request_queue *q)
{
	__set_bit(flag, &q->queue_flags);
}

J
Jens Axboe 已提交
476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501
static inline int queue_flag_test_and_clear(unsigned int flag,
					    struct request_queue *q)
{
	WARN_ON_ONCE(!queue_is_locked(q));

	if (test_bit(flag, &q->queue_flags)) {
		__clear_bit(flag, &q->queue_flags);
		return 1;
	}

	return 0;
}

static inline int queue_flag_test_and_set(unsigned int flag,
					  struct request_queue *q)
{
	WARN_ON_ONCE(!queue_is_locked(q));

	if (!test_bit(flag, &q->queue_flags)) {
		__set_bit(flag, &q->queue_flags);
		return 0;
	}

	return 1;
}

N
Nick Piggin 已提交
502 503
static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
{
504
	WARN_ON_ONCE(!queue_is_locked(q));
N
Nick Piggin 已提交
505 506 507 508 509 510 511 512 513 514 515
	__set_bit(flag, &q->queue_flags);
}

static inline void queue_flag_clear_unlocked(unsigned int flag,
					     struct request_queue *q)
{
	__clear_bit(flag, &q->queue_flags);
}

static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
{
516
	WARN_ON_ONCE(!queue_is_locked(q));
N
Nick Piggin 已提交
517 518 519
	__clear_bit(flag, &q->queue_flags);
}

520 521 522 523 524 525 526 527 528 529 530 531
enum {
	/*
	 * Hardbarrier is supported with one of the following methods.
	 *
	 * NONE		: hardbarrier unsupported
	 * DRAIN	: ordering by draining is enough
	 * DRAIN_FLUSH	: ordering by draining w/ pre and post flushes
	 * DRAIN_FUA	: ordering by draining w/ pre flush and FUA write
	 * TAG		: ordering by tag is enough
	 * TAG_FLUSH	: ordering by tag w/ pre and post flushes
	 * TAG_FUA	: ordering by tag w/ pre flush and FUA write
	 */
532 533 534
	QUEUE_ORDERED_BY_DRAIN		= 0x01,
	QUEUE_ORDERED_BY_TAG		= 0x02,
	QUEUE_ORDERED_DO_PREFLUSH	= 0x10,
535
	QUEUE_ORDERED_DO_BAR		= 0x20,
536 537 538 539 540
	QUEUE_ORDERED_DO_POSTFLUSH	= 0x40,
	QUEUE_ORDERED_DO_FUA		= 0x80,

	QUEUE_ORDERED_NONE		= 0x00,

541 542
	QUEUE_ORDERED_DRAIN		= QUEUE_ORDERED_BY_DRAIN |
					  QUEUE_ORDERED_DO_BAR,
543 544 545 546 547 548 549
	QUEUE_ORDERED_DRAIN_FLUSH	= QUEUE_ORDERED_DRAIN |
					  QUEUE_ORDERED_DO_PREFLUSH |
					  QUEUE_ORDERED_DO_POSTFLUSH,
	QUEUE_ORDERED_DRAIN_FUA		= QUEUE_ORDERED_DRAIN |
					  QUEUE_ORDERED_DO_PREFLUSH |
					  QUEUE_ORDERED_DO_FUA,

550 551
	QUEUE_ORDERED_TAG		= QUEUE_ORDERED_BY_TAG |
					  QUEUE_ORDERED_DO_BAR,
552 553 554 555 556 557
	QUEUE_ORDERED_TAG_FLUSH		= QUEUE_ORDERED_TAG |
					  QUEUE_ORDERED_DO_PREFLUSH |
					  QUEUE_ORDERED_DO_POSTFLUSH,
	QUEUE_ORDERED_TAG_FUA		= QUEUE_ORDERED_TAG |
					  QUEUE_ORDERED_DO_PREFLUSH |
					  QUEUE_ORDERED_DO_FUA,
558 559 560 561 562 563 564 565 566 567 568

	/*
	 * Ordered operation sequence
	 */
	QUEUE_ORDSEQ_STARTED	= 0x01,	/* flushing in progress */
	QUEUE_ORDSEQ_DRAIN	= 0x02,	/* waiting for the queue to be drained */
	QUEUE_ORDSEQ_PREFLUSH	= 0x04,	/* pre-flushing in progress */
	QUEUE_ORDSEQ_BAR	= 0x08,	/* original barrier req in progress */
	QUEUE_ORDSEQ_POSTFLUSH	= 0x10,	/* post-flushing in progress */
	QUEUE_ORDSEQ_DONE	= 0x20,
};
L
Linus Torvalds 已提交
569 570 571 572

#define blk_queue_plugged(q)	test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
#define blk_queue_tagged(q)	test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q)	test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
573
#define blk_queue_nomerges(q)	test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
574
#define blk_queue_nonrot(q)	test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
575
#define blk_queue_io_stat(q)	test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
576
#define blk_queue_flushing(q)	((q)->ordseq)
577 578
#define blk_queue_stackable(q)	\
	test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
L
Linus Torvalds 已提交
579

580 581 582 583 584
#define blk_fs_request(rq)	((rq)->cmd_type == REQ_TYPE_FS)
#define blk_pc_request(rq)	((rq)->cmd_type == REQ_TYPE_BLOCK_PC)
#define blk_special_request(rq)	((rq)->cmd_type == REQ_TYPE_SPECIAL)
#define blk_sense_request(rq)	((rq)->cmd_type == REQ_TYPE_SENSE)

585 586 587 588 589 590
#define blk_failfast_dev(rq)	((rq)->cmd_flags & REQ_FAILFAST_DEV)
#define blk_failfast_transport(rq) ((rq)->cmd_flags & REQ_FAILFAST_TRANSPORT)
#define blk_failfast_driver(rq)	((rq)->cmd_flags & REQ_FAILFAST_DRIVER)
#define blk_noretry_request(rq)	(blk_failfast_dev(rq) ||	\
				 blk_failfast_transport(rq) ||	\
				 blk_failfast_driver(rq))
591
#define blk_rq_started(rq)	((rq)->cmd_flags & REQ_STARTED)
592
#define blk_rq_io_stat(rq)	((rq)->cmd_flags & REQ_IO_STAT)
L
Linus Torvalds 已提交
593

594
#define blk_account_rq(rq)	(blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq))) 
L
Linus Torvalds 已提交
595

596 597
#define blk_pm_suspend_request(rq)	((rq)->cmd_type == REQ_TYPE_PM_SUSPEND)
#define blk_pm_resume_request(rq)	((rq)->cmd_type == REQ_TYPE_PM_RESUME)
L
Linus Torvalds 已提交
598
#define blk_pm_request(rq)	\
599
	(blk_pm_suspend_request(rq) || blk_pm_resume_request(rq))
L
Linus Torvalds 已提交
600

601
#define blk_rq_cpu_valid(rq)	((rq)->cpu != -1)
602 603 604
#define blk_sorted_rq(rq)	((rq)->cmd_flags & REQ_SORTED)
#define blk_barrier_rq(rq)	((rq)->cmd_flags & REQ_HARDBARRIER)
#define blk_fua_rq(rq)		((rq)->cmd_flags & REQ_FUA)
D
David Woodhouse 已提交
605
#define blk_discard_rq(rq)	((rq)->cmd_flags & REQ_DISCARD)
606
#define blk_bidi_rq(rq)		((rq)->next_rq != NULL)
607 608
/* rq->queuelist of dequeued request must be list_empty() */
#define blk_queued_rq(rq)	(!list_empty(&(rq)->queuelist))
L
Linus Torvalds 已提交
609 610 611

#define list_entry_rq(ptr)	list_entry((ptr), struct request, queuelist)

612
#define rq_data_dir(rq)		((rq)->cmd_flags & 1)
L
Linus Torvalds 已提交
613

614
/*
615
 * We regard a request as sync, if either a read or a sync write
616
 */
617 618 619 620 621 622 623 624 625 626
static inline bool rw_is_sync(unsigned int rw_flags)
{
	return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC);
}

static inline bool rq_is_sync(struct request *rq)
{
	return rw_is_sync(rq->cmd_flags);
}

627
#define rq_is_meta(rq)		((rq)->cmd_flags & REQ_RW_META)
628
#define rq_noidle(rq)		((rq)->cmd_flags & REQ_NOIDLE)
629

630
static inline int blk_queue_full(struct request_queue *q, int sync)
L
Linus Torvalds 已提交
631
{
632 633 634
	if (sync)
		return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags);
	return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags);
L
Linus Torvalds 已提交
635 636
}

637
static inline void blk_set_queue_full(struct request_queue *q, int sync)
L
Linus Torvalds 已提交
638
{
639 640
	if (sync)
		queue_flag_set(QUEUE_FLAG_SYNCFULL, q);
L
Linus Torvalds 已提交
641
	else
642
		queue_flag_set(QUEUE_FLAG_ASYNCFULL, q);
L
Linus Torvalds 已提交
643 644
}

645
static inline void blk_clear_queue_full(struct request_queue *q, int sync)
L
Linus Torvalds 已提交
646
{
647 648
	if (sync)
		queue_flag_clear(QUEUE_FLAG_SYNCFULL, q);
L
Linus Torvalds 已提交
649
	else
650
		queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q);
L
Linus Torvalds 已提交
651 652 653 654 655 656 657 658 659 660
}


/*
 * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may
 * it already be started by driver.
 */
#define RQ_NOMERGE_FLAGS	\
	(REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER)
#define rq_mergeable(rq)	\
661 662
	(!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
	 (blk_discard_rq(rq) || blk_fs_request((rq))))
L
Linus Torvalds 已提交
663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679

/*
 * q->prep_rq_fn return values
 */
#define BLKPREP_OK		0	/* serve it */
#define BLKPREP_KILL		1	/* fatal error, kill */
#define BLKPREP_DEFER		2	/* leave on queue */

extern unsigned long blk_max_low_pfn, blk_max_pfn;

/*
 * standard bounce addresses:
 *
 * BLK_BOUNCE_HIGH	: bounce all highmem pages
 * BLK_BOUNCE_ANY	: don't bounce anything
 * BLK_BOUNCE_ISA	: bounce pages above ISA DMA boundary
 */
680 681

#if BITS_PER_LONG == 32
L
Linus Torvalds 已提交
682
#define BLK_BOUNCE_HIGH		((u64)blk_max_low_pfn << PAGE_SHIFT)
683 684 685 686
#else
#define BLK_BOUNCE_HIGH		-1ULL
#endif
#define BLK_BOUNCE_ANY		(-1ULL)
L
Linus Torvalds 已提交
687 688
#define BLK_BOUNCE_ISA		(ISA_DMA_THRESHOLD)

689 690 691 692
/*
 * default timeout for SG_IO if none specified
 */
#define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
L
Linus Torvalds 已提交
693
#define BLK_MIN_SG_TIMEOUT	(7 * HZ)
694

695
#ifdef CONFIG_BOUNCE
L
Linus Torvalds 已提交
696
extern int init_emergency_isa_pool(void);
697
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
L
Linus Torvalds 已提交
698 699 700 701 702
#else
static inline int init_emergency_isa_pool(void)
{
	return 0;
}
703
static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
L
Linus Torvalds 已提交
704 705 706 707
{
}
#endif /* CONFIG_MMU */

708 709 710 711
struct rq_map_data {
	struct page **pages;
	int page_order;
	int nr_entries;
712
	unsigned long offset;
713
	int null_mapped;
714 715
};

716 717 718 719 720 721
struct req_iterator {
	int i;
	struct bio *bio;
};

/* This should not be used directly - use rq_for_each_segment */
722 723
#define for_each_bio(_bio)		\
	for (; _bio; _bio = _bio->bi_next)
724
#define __rq_for_each_bio(_bio, rq)	\
L
Linus Torvalds 已提交
725 726 727
	if ((rq->bio))			\
		for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)

728 729 730 731 732 733 734
#define rq_for_each_segment(bvl, _rq, _iter)			\
	__rq_for_each_bio(_iter.bio, _rq)			\
		bio_for_each_segment(bvl, _iter.bio, _iter.i)

#define rq_iter_last(rq, _iter)					\
		(_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1)

L
Linus Torvalds 已提交
735 736 737 738
extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk);
extern void register_disk(struct gendisk *dev);
extern void generic_make_request(struct bio *bio);
739
extern void blk_rq_init(struct request_queue *q, struct request *rq);
L
Linus Torvalds 已提交
740
extern void blk_put_request(struct request *);
741 742 743 744
extern void __blk_put_request(struct request_queue *, struct request *);
extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
extern void blk_requeue_request(struct request_queue *, struct request *);
745
extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
746
extern int blk_lld_busy(struct request_queue *q);
747 748
extern int blk_insert_cloned_request(struct request_queue *q,
				     struct request *rq);
749
extern void blk_plug_device(struct request_queue *);
750
extern void blk_plug_device_unlocked(struct request_queue *);
751 752
extern int blk_remove_plug(struct request_queue *);
extern void blk_recount_segments(struct request_queue *, struct bio *);
753 754
extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
			  unsigned int, void __user *);
755 756
extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
			 struct scsi_ioctl_command __user *);
757

J
Jens Axboe 已提交
758 759 760
/*
 * Temporary export, until SCSI gets fixed up.
 */
N
NeilBrown 已提交
761 762
extern int blk_rq_append_bio(struct request_queue *q, struct request *rq,
			     struct bio *bio);
J
Jens Axboe 已提交
763

764 765 766 767 768
/*
 * A queue has just exitted congestion.  Note this in the global counter of
 * congested queues, and wake up anyone who was waiting for requests to be
 * put back.
 */
769
static inline void blk_clear_queue_congested(struct request_queue *q, int rw)
770 771 772 773 774 775 776 777
{
	clear_bdi_congested(&q->backing_dev_info, rw);
}

/*
 * A queue has just entered congestion.  Flag that in the queue's VM-visible
 * state flags and increment the global gounter of congested queues.
 */
778
static inline void blk_set_queue_congested(struct request_queue *q, int rw)
779 780 781 782
{
	set_bdi_congested(&q->backing_dev_info, rw);
}

783 784
extern void blk_start_queue(struct request_queue *q);
extern void blk_stop_queue(struct request_queue *q);
L
Linus Torvalds 已提交
785
extern void blk_sync_queue(struct request_queue *q);
786
extern void __blk_stop_queue(struct request_queue *q);
N
Nick Piggin 已提交
787
extern void __blk_run_queue(struct request_queue *);
788
extern void blk_run_queue(struct request_queue *);
789
extern int blk_rq_map_user(struct request_queue *, struct request *,
790 791
			   struct rq_map_data *, void __user *, unsigned long,
			   gfp_t);
J
Jens Axboe 已提交
792
extern int blk_rq_unmap_user(struct bio *);
793 794
extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
795 796
			       struct rq_map_data *, struct sg_iovec *, int,
			       unsigned int, gfp_t);
797
extern int blk_execute_rq(struct request_queue *, struct gendisk *,
798
			  struct request *, int);
799
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
800
				  struct request *, int, rq_end_io_fn *);
801
extern void blk_unplug(struct request_queue *q);
802

803
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
L
Linus Torvalds 已提交
804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820
{
	return bdev->bd_disk->queue;
}

static inline void blk_run_backing_dev(struct backing_dev_info *bdi,
				       struct page *page)
{
	if (bdi && bdi->unplug_io_fn)
		bdi->unplug_io_fn(bdi, page);
}

static inline void blk_run_address_space(struct address_space *mapping)
{
	if (mapping)
		blk_run_backing_dev(mapping->backing_dev_info, NULL);
}

821
/*
822 823 824 825 826
 * blk_rq_pos()		: the current sector
 * blk_rq_bytes()	: bytes left in the entire request
 * blk_rq_cur_bytes()	: bytes left in the current segment
 * blk_rq_sectors()	: sectors left in the entire request
 * blk_rq_cur_sectors()	: sectors left in the current segment
827
 */
828 829
static inline sector_t blk_rq_pos(const struct request *rq)
{
830
	return rq->__sector;
831 832 833 834
}

static inline unsigned int blk_rq_bytes(const struct request *rq)
{
835
	return rq->__data_len;
836 837
}

838 839 840 841
static inline int blk_rq_cur_bytes(const struct request *rq)
{
	return rq->bio ? bio_cur_bytes(rq->bio) : 0;
}
842

843 844
static inline unsigned int blk_rq_sectors(const struct request *rq)
{
845
	return blk_rq_bytes(rq) >> 9;
846 847 848 849
}

static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
{
850
	return blk_rq_cur_bytes(rq) >> 9;
851 852
}

853 854 855 856 857 858 859
/*
 * Request issue related functions.
 */
extern struct request *blk_peek_request(struct request_queue *q);
extern void blk_start_request(struct request *rq);
extern struct request *blk_fetch_request(struct request_queue *q);

L
Linus Torvalds 已提交
860
/*
861 862 863 864 865
 * Request completion related functions.
 *
 * blk_update_request() completes given number of bytes and updates
 * the request without completing it.
 *
866 867
 * blk_end_request() and friends.  __blk_end_request() must be called
 * with the request queue spinlock acquired.
L
Linus Torvalds 已提交
868 869
 *
 * Several drivers define their own end_request and call
870 871
 * blk_end_request() for parts of the original function.
 * This prevents code duplication in drivers.
L
Linus Torvalds 已提交
872
 */
873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901
extern bool blk_update_request(struct request *rq, int error,
			       unsigned int nr_bytes);
extern bool blk_end_bidi_request(struct request *rq, int error,
				 unsigned int nr_bytes,
				 unsigned int bidi_bytes);
extern bool __blk_end_bidi_request(struct request *rq, int error,
				   unsigned int nr_bytes,
				   unsigned int bidi_bytes);

/**
 * blk_end_request - Helper function for drivers to complete the request.
 * @rq:       the request being processed
 * @error:    %0 for success, < %0 for error
 * @nr_bytes: number of bytes to complete
 *
 * Description:
 *     Ends I/O on a number of bytes attached to @rq.
 *     If @rq has leftover, sets it up for the next range of segments.
 *
 * Return:
 *     %false - we are done with this request
 *     %true  - still buffers pending for this request
 **/
static inline bool blk_end_request(struct request *rq, int error,
				   unsigned int nr_bytes)
{
	return blk_end_bidi_request(rq, error, nr_bytes, 0);
}

902 903 904 905 906 907 908 909 910 911 912
/**
 * blk_end_request_all - Helper function for drives to finish the request.
 * @rq: the request to finish
 * @err: %0 for success, < %0 for error
 *
 * Description:
 *     Completely finish @rq.
 */
static inline void blk_end_request_all(struct request *rq, int error)
{
	bool pending;
913
	unsigned int bidi_bytes = 0;
914

915 916 917 918
	if (unlikely(blk_bidi_rq(rq)))
		bidi_bytes = blk_rq_bytes(rq->next_rq);

	pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
919 920 921
	BUG_ON(pending);
}

922 923 924 925 926 927 928
/**
 * blk_end_request_cur - Helper function to finish the current request chunk.
 * @rq: the request to finish the current chunk for
 * @err: %0 for success, < %0 for error
 *
 * Description:
 *     Complete the current consecutively mapped chunk from @rq.
929 930 931 932
 *
 * Return:
 *     %false - we are done with this request
 *     %true  - still buffers pending for this request
933
 */
934
static inline bool blk_end_request_cur(struct request *rq, int error)
935
{
936
	return blk_end_request(rq, error, blk_rq_cur_bytes(rq));
937 938
}

939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957
/**
 * __blk_end_request - Helper function for drivers to complete the request.
 * @rq:       the request being processed
 * @error:    %0 for success, < %0 for error
 * @nr_bytes: number of bytes to complete
 *
 * Description:
 *     Must be called with queue lock held unlike blk_end_request().
 *
 * Return:
 *     %false - we are done with this request
 *     %true  - still buffers pending for this request
 **/
static inline bool __blk_end_request(struct request *rq, int error,
				     unsigned int nr_bytes)
{
	return __blk_end_bidi_request(rq, error, nr_bytes, 0);
}

958 959 960 961 962 963 964 965 966 967 968
/**
 * __blk_end_request_all - Helper function for drives to finish the request.
 * @rq: the request to finish
 * @err: %0 for success, < %0 for error
 *
 * Description:
 *     Completely finish @rq.  Must be called with queue lock held.
 */
static inline void __blk_end_request_all(struct request *rq, int error)
{
	bool pending;
969 970 971 972
	unsigned int bidi_bytes = 0;

	if (unlikely(blk_bidi_rq(rq)))
		bidi_bytes = blk_rq_bytes(rq->next_rq);
973

974
	pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
975 976 977
	BUG_ON(pending);
}

978
/**
979 980 981
 * __blk_end_request_cur - Helper function to finish the current request chunk.
 * @rq: the request to finish the current chunk for
 * @err: %0 for success, < %0 for error
982 983
 *
 * Description:
984 985
 *     Complete the current consecutively mapped chunk from @rq.  Must
 *     be called with queue lock held.
986 987 988 989
 *
 * Return:
 *     %false - we are done with this request
 *     %true  - still buffers pending for this request
990
 */
991
static inline bool __blk_end_request_cur(struct request *rq, int error)
992
{
993
	return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
994 995
}

996
extern void blk_complete_request(struct request *);
J
Jens Axboe 已提交
997 998
extern void __blk_complete_request(struct request *);
extern void blk_abort_request(struct request *);
999
extern void blk_abort_queue(struct request_queue *);
1000

L
Linus Torvalds 已提交
1001 1002 1003
/*
 * Access functions for manipulating queue properties
 */
1004
extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
1005
					spinlock_t *lock, int node_id);
1006 1007 1008 1009 1010 1011 1012 1013 1014 1015
extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
extern void blk_queue_max_sectors(struct request_queue *, unsigned int);
extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
1016
extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
1017
extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
1018 1019 1020
extern int blk_queue_dma_drain(struct request_queue *q,
			       dma_drain_needed_fn *dma_drain_needed,
			       void *buf, unsigned int size);
1021
extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
1022 1023 1024 1025
extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
extern void blk_queue_dma_alignment(struct request_queue *, int);
1026
extern void blk_queue_update_dma_alignment(struct request_queue *, int);
1027
extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
D
David Woodhouse 已提交
1028
extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *);
J
Jens Axboe 已提交
1029 1030
extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
L
Linus Torvalds 已提交
1031
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
1032
extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
1033
extern bool blk_do_ordered(struct request_queue *, struct request **);
1034
extern unsigned blk_ordered_cur_seq(struct request_queue *);
1035
extern unsigned blk_ordered_req_seq(struct request *);
1036
extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int);
L
Linus Torvalds 已提交
1037

1038
extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
L
Linus Torvalds 已提交
1039
extern void blk_dump_rq_flags(struct request *, char *);
1040
extern void generic_unplug_device(struct request_queue *);
L
Linus Torvalds 已提交
1041 1042
extern long nr_blockdev_pages(void);

1043 1044 1045 1046
int blk_get_queue(struct request_queue *);
struct request_queue *blk_alloc_queue(gfp_t);
struct request_queue *blk_alloc_queue_node(gfp_t, int);
extern void blk_put_queue(struct request_queue *);
L
Linus Torvalds 已提交
1047 1048 1049 1050

/*
 * tag stuff
 */
1051
#define blk_rq_tagged(rq)		((rq)->cmd_flags & REQ_QUEUED)
1052 1053 1054 1055 1056 1057 1058
extern int blk_queue_start_tag(struct request_queue *, struct request *);
extern struct request *blk_queue_find_tag(struct request_queue *, int);
extern void blk_queue_end_tag(struct request_queue *, struct request *);
extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *);
extern void blk_queue_free_tags(struct request_queue *);
extern int blk_queue_resize_tags(struct request_queue *, int);
extern void blk_queue_invalidate_tags(struct request_queue *);
1059 1060
extern struct blk_queue_tag *blk_init_tags(int);
extern void blk_free_tags(struct blk_queue_tag *);
L
Linus Torvalds 已提交
1061

1062 1063 1064 1065 1066 1067 1068 1069
static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
						int tag)
{
	if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
		return NULL;
	return bqt->tag_index[tag];
}

L
Linus Torvalds 已提交
1070
extern int blkdev_issue_flush(struct block_device *, sector_t *);
1071 1072
extern int blkdev_issue_discard(struct block_device *,
				sector_t sector, sector_t nr_sects, gfp_t);
D
David Woodhouse 已提交
1073 1074

static inline int sb_issue_discard(struct super_block *sb,
1075
				   sector_t block, sector_t nr_blocks)
D
David Woodhouse 已提交
1076 1077 1078
{
	block <<= (sb->s_blocksize_bits - 9);
	nr_blocks <<= (sb->s_blocksize_bits - 9);
1079
	return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL);
D
David Woodhouse 已提交
1080
}
L
Linus Torvalds 已提交
1081

1082 1083 1084
/*
* command filter functions
*/
1085
extern int blk_verify_command(struct blk_cmd_filter *filter,
1086 1087
			      unsigned char *cmd, fmode_t has_write_perm);
extern void blk_unregister_filter(struct gendisk *disk);
1088
extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter);
1089

L
Linus Torvalds 已提交
1090 1091
#define MAX_PHYS_SEGMENTS 128
#define MAX_HW_SEGMENTS 128
1092 1093
#define SAFE_MAX_SECTORS 255
#define BLK_DEF_MAX_SECTORS 1024
L
Linus Torvalds 已提交
1094 1095 1096

#define MAX_SEGMENT_SIZE	65536

1097 1098
#define BLK_SEG_BOUNDARY_MASK	0xFFFFFFFFUL

L
Linus Torvalds 已提交
1099 1100
#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)

1101
static inline int queue_hardsect_size(struct request_queue *q)
L
Linus Torvalds 已提交
1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115
{
	int retval = 512;

	if (q && q->hardsect_size)
		retval = q->hardsect_size;

	return retval;
}

static inline int bdev_hardsect_size(struct block_device *bdev)
{
	return queue_hardsect_size(bdev_get_queue(bdev));
}

1116
static inline int queue_dma_alignment(struct request_queue *q)
L
Linus Torvalds 已提交
1117
{
1118
	return q ? q->dma_alignment : 511;
L
Linus Torvalds 已提交
1119 1120
}

1121 1122 1123 1124 1125 1126 1127
static inline int blk_rq_aligned(struct request_queue *q, void *addr,
				 unsigned int len)
{
	unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
	return !((unsigned long)addr & alignment) && !(len & alignment);
}

L
Linus Torvalds 已提交
1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138
/* assumes size > 256 */
static inline unsigned int blksize_bits(unsigned int size)
{
	unsigned int bits = 8;
	do {
		bits++;
		size >>= 1;
	} while (size > 256);
	return bits;
}

1139
static inline unsigned int block_size(struct block_device *bdev)
L
Linus Torvalds 已提交
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153
{
	return bdev->bd_block_size;
}

typedef struct {struct page *v;} Sector;

unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);

static inline void put_dev_sector(Sector p)
{
	page_cache_release(p.v);
}

struct work_struct;
1154
int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
L
Linus Torvalds 已提交
1155 1156 1157 1158 1159 1160

#define MODULE_ALIAS_BLOCKDEV(major,minor) \
	MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
	MODULE_ALIAS("block-major-" __stringify(major) "-*")

1161 1162
#if defined(CONFIG_BLK_DEV_INTEGRITY)

1163 1164
#define INTEGRITY_FLAG_READ	2	/* verify data integrity on read */
#define INTEGRITY_FLAG_WRITE	4	/* generate data integrity on write */
1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197

struct blk_integrity_exchg {
	void			*prot_buf;
	void			*data_buf;
	sector_t		sector;
	unsigned int		data_size;
	unsigned short		sector_size;
	const char		*disk_name;
};

typedef void (integrity_gen_fn) (struct blk_integrity_exchg *);
typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *);
typedef void (integrity_set_tag_fn) (void *, void *, unsigned int);
typedef void (integrity_get_tag_fn) (void *, void *, unsigned int);

struct blk_integrity {
	integrity_gen_fn	*generate_fn;
	integrity_vrfy_fn	*verify_fn;
	integrity_set_tag_fn	*set_tag_fn;
	integrity_get_tag_fn	*get_tag_fn;

	unsigned short		flags;
	unsigned short		tuple_size;
	unsigned short		sector_size;
	unsigned short		tag_size;

	const char		*name;

	struct kobject		kobj;
};

extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
extern void blk_integrity_unregister(struct gendisk *);
1198
extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
1199 1200 1201
extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *);
extern int blk_rq_count_integrity_sg(struct request *);

1202 1203 1204 1205 1206 1207
static inline
struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
{
	return bdev->bd_disk->integrity;
}

1208 1209 1210 1211 1212
static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
{
	return disk->integrity;
}

1213 1214
static inline int blk_integrity_rq(struct request *rq)
{
1215 1216 1217
	if (rq->bio == NULL)
		return 0;

1218 1219 1220 1221 1222 1223 1224 1225
	return bio_integrity(rq->bio);
}

#else /* CONFIG_BLK_DEV_INTEGRITY */

#define blk_integrity_rq(rq)			(0)
#define blk_rq_count_integrity_sg(a)		(0)
#define blk_rq_map_integrity_sg(a, b)		(0)
1226
#define bdev_get_integrity(a)			(0)
1227
#define blk_get_integrity(a)			(0)
1228 1229 1230 1231 1232 1233
#define blk_integrity_compare(a, b)		(0)
#define blk_integrity_register(a, b)		(0)
#define blk_integrity_unregister(a)		do { } while (0);

#endif /* CONFIG_BLK_DEV_INTEGRITY */

1234
struct block_device_operations {
A
Al Viro 已提交
1235 1236 1237 1238 1239
	int (*open) (struct block_device *, fmode_t);
	int (*release) (struct gendisk *, fmode_t);
	int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
	int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
	int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1240 1241 1242 1243 1244 1245 1246 1247
	int (*direct_access) (struct block_device *, sector_t,
						void **, unsigned long *);
	int (*media_changed) (struct gendisk *);
	int (*revalidate_disk) (struct gendisk *);
	int (*getgeo)(struct block_device *, struct hd_geometry *);
	struct module *owner;
};

1248 1249
extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
				 unsigned long);
1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262
#else /* CONFIG_BLOCK */
/*
 * stubs for when the block layer is configured out
 */
#define buffer_heads_over_limit 0

static inline long nr_blockdev_pages(void)
{
	return 0;
}

#endif /* CONFIG_BLOCK */

L
Linus Torvalds 已提交
1263
#endif