blkdev.h 26.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
#ifndef _LINUX_BLKDEV_H
#define _LINUX_BLKDEV_H

4
#include <linux/sched.h>
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
#include <linux/major.h>
#include <linux/genhd.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/pagemap.h>
#include <linux/backing-dev.h>
#include <linux/wait.h>
#include <linux/mempool.h>
#include <linux/bio.h>
#include <linux/module.h>
#include <linux/stringify.h>

#include <asm/scatterlist.h>

20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
#ifdef CONFIG_LBD
# include <asm/div64.h>
# define sector_div(a, b) do_div(a, b)
#else
# define sector_div(n, b)( \
{ \
	int _res; \
	_res = (n) % (b); \
	(n) /= (b); \
	_res; \
} \
)
#endif

#ifdef CONFIG_BLOCK

36 37
struct scsi_ioctl_command;

L
Linus Torvalds 已提交
38 39 40 41 42
struct request_queue;
typedef struct request_queue request_queue_t;
struct elevator_queue;
typedef struct elevator_queue elevator_t;
struct request_pm_state;
43
struct blk_trace;
44 45
struct request;
struct sg_io_hdr;
L
Linus Torvalds 已提交
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77

#define BLKDEV_MIN_RQ	4
#define BLKDEV_MAX_RQ	128	/* Default maximum */

/*
 * This is the per-process anticipatory I/O scheduler state.
 */
struct as_io_context {
	spinlock_t lock;

	void (*dtor)(struct as_io_context *aic); /* destructor */
	void (*exit)(struct as_io_context *aic); /* called on task exit */

	unsigned long state;
	atomic_t nr_queued; /* queued reads & sync writes */
	atomic_t nr_dispatched; /* number of requests gone to the drivers */

	/* IO History tracking */
	/* Thinktime */
	unsigned long last_end_request;
	unsigned long ttime_total;
	unsigned long ttime_samples;
	unsigned long ttime_mean;
	/* Layout pattern */
	unsigned int seek_samples;
	sector_t last_request_pos;
	u64 seek_total;
	sector_t seek_mean;
};

struct cfq_queue;
struct cfq_io_context {
78
	struct rb_node rb_node;
79 80
	void *key;

81 82
	struct cfq_queue *cfqq[2];

83 84 85
	struct io_context *ioc;

	unsigned long last_end_request;
86 87
	sector_t last_request_pos;

88 89 90 91
	unsigned long ttime_total;
	unsigned long ttime_samples;
	unsigned long ttime_mean;

92 93 94 95
	unsigned int seek_samples;
	u64 seek_total;
	sector_t seek_mean;

96 97
	struct list_head queue_list;

98 99
	void (*dtor)(struct io_context *); /* destructor */
	void (*exit)(struct io_context *); /* called on task exit */
L
Linus Torvalds 已提交
100 101 102 103 104 105 106 107 108
};

/*
 * This is the per-process I/O subsystem state.  It is refcounted and
 * kmalloc'ed. Currently all fields are modified in process io context
 * (apart from the atomic refcount), so require no locking.
 */
struct io_context {
	atomic_t refcount;
109 110
	struct task_struct *task;

111
	unsigned int ioprio_changed;
L
Linus Torvalds 已提交
112 113 114 115 116 117 118 119

	/*
	 * For request batching
	 */
	unsigned long last_waited; /* Time last woken after wait for request */
	int nr_batch_requests;     /* Number of requests left in the batch */

	struct as_io_context *aic;
120
	struct rb_root cic_root;
121
	void *ioc_data;
L
Linus Torvalds 已提交
122 123 124 125
};

void put_io_context(struct io_context *ioc);
void exit_io_context(void);
126
struct io_context *get_io_context(gfp_t gfp_flags, int node);
L
Linus Torvalds 已提交
127 128 129 130
void copy_io_context(struct io_context **pdst, struct io_context **psrc);
void swap_io_context(struct io_context **ioc1, struct io_context **ioc2);

struct request;
131
typedef void (rq_end_io_fn)(struct request *, int);
L
Linus Torvalds 已提交
132 133 134 135

struct request_list {
	int count[2];
	int starved[2];
T
Tejun Heo 已提交
136
	int elvpriv;
L
Linus Torvalds 已提交
137 138 139 140
	mempool_t *rq_pool;
	wait_queue_head_t wait[2];
};

141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
/*
 * request command types
 */
enum rq_cmd_type_bits {
	REQ_TYPE_FS		= 1,	/* fs request */
	REQ_TYPE_BLOCK_PC,		/* scsi command */
	REQ_TYPE_SENSE,			/* sense request */
	REQ_TYPE_PM_SUSPEND,		/* suspend request */
	REQ_TYPE_PM_RESUME,		/* resume request */
	REQ_TYPE_PM_SHUTDOWN,		/* shutdown request */
	REQ_TYPE_FLUSH,			/* flush request */
	REQ_TYPE_SPECIAL,		/* driver defined type */
	REQ_TYPE_LINUX_BLOCK,		/* generic block layer message */
	/*
	 * for ATA/ATAPI devices. this really doesn't belong here, ide should
	 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver
	 * private REQ_LB opcodes to differentiate what type of request this is
	 */
	REQ_TYPE_ATA_CMD,
	REQ_TYPE_ATA_TASK,
	REQ_TYPE_ATA_TASKFILE,
162
	REQ_TYPE_ATA_PC,
163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
};

/*
 * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being
 * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a
 * SCSI cdb.
 *
 * 0x00 -> 0x3f are driver private, to be used for whatever purpose they need,
 * typically to differentiate REQ_TYPE_SPECIAL requests.
 *
 */
enum {
	/*
	 * just examples for now
	 */
	REQ_LB_OP_EJECT	= 0x40,		/* eject request */
	REQ_LB_OP_FLUSH = 0x41,		/* flush device */
};

/*
 * request type modified bits. first three bits match BIO_RW* bits, important
 */
enum rq_flag_bits {
	__REQ_RW,		/* not set, read. set, write */
	__REQ_FAILFAST,		/* no low level driver retries */
	__REQ_SORTED,		/* elevator knows about this request */
	__REQ_SOFTBARRIER,	/* may not be passed by ioscheduler */
	__REQ_HARDBARRIER,	/* may not be passed by drive either */
	__REQ_FUA,		/* forced unit access */
	__REQ_NOMERGE,		/* don't touch this for merging */
	__REQ_STARTED,		/* drive already may have started this one */
	__REQ_DONTPREP,		/* don't call prep for this one */
	__REQ_QUEUED,		/* uses queueing */
	__REQ_ELVPRIV,		/* elevator private data attached */
	__REQ_FAILED,		/* set if the request failed */
	__REQ_QUIET,		/* don't worry about errors */
	__REQ_PREEMPT,		/* set for "ide_preempt" requests */
	__REQ_ORDERED_COLOR,	/* is before or after barrier */
	__REQ_RW_SYNC,		/* request is sync (O_DIRECT) */
202
	__REQ_ALLOCED,		/* request came from our alloc pool */
203
	__REQ_RW_META,		/* metadata io request */
204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
	__REQ_NR_BITS,		/* stops here */
};

#define REQ_RW		(1 << __REQ_RW)
#define REQ_FAILFAST	(1 << __REQ_FAILFAST)
#define REQ_SORTED	(1 << __REQ_SORTED)
#define REQ_SOFTBARRIER	(1 << __REQ_SOFTBARRIER)
#define REQ_HARDBARRIER	(1 << __REQ_HARDBARRIER)
#define REQ_FUA		(1 << __REQ_FUA)
#define REQ_NOMERGE	(1 << __REQ_NOMERGE)
#define REQ_STARTED	(1 << __REQ_STARTED)
#define REQ_DONTPREP	(1 << __REQ_DONTPREP)
#define REQ_QUEUED	(1 << __REQ_QUEUED)
#define REQ_ELVPRIV	(1 << __REQ_ELVPRIV)
#define REQ_FAILED	(1 << __REQ_FAILED)
#define REQ_QUIET	(1 << __REQ_QUIET)
#define REQ_PREEMPT	(1 << __REQ_PREEMPT)
#define REQ_ORDERED_COLOR	(1 << __REQ_ORDERED_COLOR)
#define REQ_RW_SYNC	(1 << __REQ_RW_SYNC)
223
#define REQ_ALLOCED	(1 << __REQ_ALLOCED)
224
#define REQ_RW_META	(1 << __REQ_RW_META)
225

L
Linus Torvalds 已提交
226 227 228 229 230 231
#define BLK_MAX_CDB	16

/*
 * try to put the fields that are referenced together in the same cacheline
 */
struct request {
232 233 234
	struct list_head queuelist;
	struct list_head donelist;

235 236
	request_queue_t *q;

237 238
	unsigned int cmd_flags;
	enum rq_cmd_type_bits cmd_type;
L
Linus Torvalds 已提交
239 240 241 242 243 244

	/* Maintain bio traversal state for part by part I/O submission.
	 * hard_* are block layer internals, no driver should touch them!
	 */

	sector_t sector;		/* next sector to submit */
245
	sector_t hard_sector;		/* next sector to complete */
L
Linus Torvalds 已提交
246
	unsigned long nr_sectors;	/* no. of sectors left to submit */
247
	unsigned long hard_nr_sectors;	/* no. of sectors left to complete */
L
Linus Torvalds 已提交
248 249 250 251 252 253 254 255 256
	/* no. of sectors left to submit in the current segment */
	unsigned int current_nr_sectors;

	/* no. of sectors left to complete in the current segment */
	unsigned int hard_cur_sectors;

	struct bio *bio;
	struct bio *biotail;

257
	struct hlist_node hash;	/* merge hash */
258 259 260 261 262 263 264 265 266
	/*
	 * The rb_node is only used inside the io scheduler, requests
	 * are pruned when moved to the dispatch queue. So let the
	 * completion_data share space with the rb_node.
	 */
	union {
		struct rb_node rb_node;	/* sort/lookup */
		void *completion_data;
	};
267

268 269 270 271
	/*
	 * two pointers are available for the IO schedulers, if they need
	 * more they have to dynamically allocate it.
	 */
L
Linus Torvalds 已提交
272
	void *elevator_private;
273 274
	void *elevator_private2;

275
	struct gendisk *rq_disk;
L
Linus Torvalds 已提交
276 277 278 279 280 281 282 283 284 285 286 287 288 289
	unsigned long start_time;

	/* Number of scatter-gather DMA addr+len pairs after
	 * physical address coalescing is performed.
	 */
	unsigned short nr_phys_segments;

	/* Number of scatter-gather addr+len pairs after
	 * physical and DMA remapping hardware coalescing is performed.
	 * This is the number of scatter-gather entries the driver
	 * will actually have to deal with after DMA mapping is done.
	 */
	unsigned short nr_hw_segments;

290 291
	unsigned short ioprio;

L
Linus Torvalds 已提交
292
	void *special;
293
	char *buffer;
L
Linus Torvalds 已提交
294

295 296 297 298 299
	int tag;
	int errors;

	int ref_count;

L
Linus Torvalds 已提交
300 301 302 303 304 305 306 307
	/*
	 * when request is used as a packet command carrier
	 */
	unsigned int cmd_len;
	unsigned char cmd[BLK_MAX_CDB];

	unsigned int data_len;
	unsigned int sense_len;
308
	void *data;
L
Linus Torvalds 已提交
309 310 311
	void *sense;

	unsigned int timeout;
312
	int retries;
L
Linus Torvalds 已提交
313 314

	/*
315
	 * completion callback.
L
Linus Torvalds 已提交
316 317 318 319 320 321
	 */
	rq_end_io_fn *end_io;
	void *end_io_data;
};

/*
322
 * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME
L
Linus Torvalds 已提交
323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
 * requests. Some step values could eventually be made generic.
 */
struct request_pm_state
{
	/* PM state machine step value, currently driver specific */
	int	pm_step;
	/* requested PM state value (S1, S2, S3, S4, ...) */
	u32	pm_state;
	void*	data;		/* for driver use */
};

#include <linux/elevator.h>

typedef void (request_fn_proc) (request_queue_t *q);
typedef int (make_request_fn) (request_queue_t *q, struct bio *bio);
typedef int (prep_rq_fn) (request_queue_t *, struct request *);
typedef void (unplug_fn) (request_queue_t *);

struct bio_vec;
typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *);
typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *);
344
typedef void (prepare_flush_fn) (request_queue_t *, struct request *);
345
typedef void (softirq_done_fn)(struct request *);
L
Linus Torvalds 已提交
346 347 348 349 350 351 352 353 354 355 356 357

enum blk_queue_state {
	Queue_down,
	Queue_up,
};

struct blk_queue_tag {
	struct request **tag_index;	/* map of busy tags */
	unsigned long *tag_map;		/* bit map of free/busy tags */
	struct list_head busy_list;	/* fifo list of busy tags */
	int busy;			/* current depth */
	int max_depth;			/* what we will send to device */
358
	int real_max_depth;		/* what the array can hold */
L
Linus Torvalds 已提交
359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382
	atomic_t refcnt;		/* map can be shared */
};

struct request_queue
{
	/*
	 * Together with queue_head for cacheline sharing
	 */
	struct list_head	queue_head;
	struct request		*last_merge;
	elevator_t		*elevator;

	/*
	 * the queue request freelist, one for reads and one for writes
	 */
	struct request_list	rq;

	request_fn_proc		*request_fn;
	make_request_fn		*make_request_fn;
	prep_rq_fn		*prep_rq_fn;
	unplug_fn		*unplug_fn;
	merge_bvec_fn		*merge_bvec_fn;
	issue_flush_fn		*issue_flush_fn;
	prepare_flush_fn	*prepare_flush_fn;
383
	softirq_done_fn		*softirq_done_fn;
L
Linus Torvalds 已提交
384

385 386 387
	/*
	 * Dispatch queue sorting
	 */
J
Jens Axboe 已提交
388
	sector_t		end_sector;
389 390
	struct request		*boundary_rq;

L
Linus Torvalds 已提交
391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410
	/*
	 * Auto-unplugging state
	 */
	struct timer_list	unplug_timer;
	int			unplug_thresh;	/* After this many requests */
	unsigned long		unplug_delay;	/* After this many jiffies */
	struct work_struct	unplug_work;

	struct backing_dev_info	backing_dev_info;

	/*
	 * The queue owner gets to use this for whatever they like.
	 * ll_rw_blk doesn't touch it.
	 */
	void			*queuedata;

	/*
	 * queue needs bounce pages for pages above this limit
	 */
	unsigned long		bounce_pfn;
A
Al Viro 已提交
411
	gfp_t			bounce_gfp;
L
Linus Torvalds 已提交
412 413 414 415 416 417 418

	/*
	 * various queue flags, see QUEUE_* below
	 */
	unsigned long		queue_flags;

	/*
已提交
419 420 421
	 * protects queue structures from reentrancy. ->__queue_lock should
	 * _never_ be used directly, it is queue private. always use
	 * ->queue_lock.
L
Linus Torvalds 已提交
422
	 */
已提交
423
	spinlock_t		__queue_lock;
L
Linus Torvalds 已提交
424 425 426 427 428 429 430 431 432 433 434 435 436 437 438
	spinlock_t		*queue_lock;

	/*
	 * queue kobject
	 */
	struct kobject kobj;

	/*
	 * queue settings
	 */
	unsigned long		nr_requests;	/* Max # of requests */
	unsigned int		nr_congestion_on;
	unsigned int		nr_congestion_off;
	unsigned int		nr_batching;

439 440
	unsigned int		max_sectors;
	unsigned int		max_hw_sectors;
L
Linus Torvalds 已提交
441 442 443 444 445 446 447 448 449 450
	unsigned short		max_phys_segments;
	unsigned short		max_hw_segments;
	unsigned short		hardsect_size;
	unsigned int		max_segment_size;

	unsigned long		seg_boundary_mask;
	unsigned int		dma_alignment;

	struct blk_queue_tag	*queue_tags;

451
	unsigned int		nr_sorted;
L
Linus Torvalds 已提交
452 453 454 455 456 457 458
	unsigned int		in_flight;

	/*
	 * sg stuff
	 */
	unsigned int		sg_timeout;
	unsigned int		sg_reserved_size;
459
	int			node;
460
#ifdef CONFIG_BLK_DEV_IO_TRACE
461
	struct blk_trace	*blk_trace;
462
#endif
L
Linus Torvalds 已提交
463 464 465
	/*
	 * reserved for flush operations
	 */
466 467 468 469 470
	unsigned int		ordered, next_ordered, ordseq;
	int			orderr, ordcolor;
	struct request		pre_flush_rq, bar_rq, post_flush_rq;
	struct request		*orig_bar_rq;
	unsigned int		bi_size;
471 472

	struct mutex		sysfs_lock;
L
Linus Torvalds 已提交
473 474 475 476 477 478 479 480 481 482
};

#define QUEUE_FLAG_CLUSTER	0	/* cluster several segments into 1 */
#define QUEUE_FLAG_QUEUED	1	/* uses generic tag queueing */
#define QUEUE_FLAG_STOPPED	2	/* queue is stopped */
#define	QUEUE_FLAG_READFULL	3	/* write queue has been filled */
#define QUEUE_FLAG_WRITEFULL	4	/* read queue has been filled */
#define QUEUE_FLAG_DEAD		5	/* queue being torn down */
#define QUEUE_FLAG_REENTER	6	/* Re-entrancy avoidance */
#define QUEUE_FLAG_PLUGGED	7	/* queue is plugged */
J
Jens Axboe 已提交
483
#define QUEUE_FLAG_ELVSWITCH	8	/* don't use elevator, just do FIFO */
484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523

enum {
	/*
	 * Hardbarrier is supported with one of the following methods.
	 *
	 * NONE		: hardbarrier unsupported
	 * DRAIN	: ordering by draining is enough
	 * DRAIN_FLUSH	: ordering by draining w/ pre and post flushes
	 * DRAIN_FUA	: ordering by draining w/ pre flush and FUA write
	 * TAG		: ordering by tag is enough
	 * TAG_FLUSH	: ordering by tag w/ pre and post flushes
	 * TAG_FUA	: ordering by tag w/ pre flush and FUA write
	 */
	QUEUE_ORDERED_NONE	= 0x00,
	QUEUE_ORDERED_DRAIN	= 0x01,
	QUEUE_ORDERED_TAG	= 0x02,

	QUEUE_ORDERED_PREFLUSH	= 0x10,
	QUEUE_ORDERED_POSTFLUSH	= 0x20,
	QUEUE_ORDERED_FUA	= 0x40,

	QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
			QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
	QUEUE_ORDERED_DRAIN_FUA	= QUEUE_ORDERED_DRAIN |
			QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
	QUEUE_ORDERED_TAG_FLUSH	= QUEUE_ORDERED_TAG |
			QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
	QUEUE_ORDERED_TAG_FUA	= QUEUE_ORDERED_TAG |
			QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,

	/*
	 * Ordered operation sequence
	 */
	QUEUE_ORDSEQ_STARTED	= 0x01,	/* flushing in progress */
	QUEUE_ORDSEQ_DRAIN	= 0x02,	/* waiting for the queue to be drained */
	QUEUE_ORDSEQ_PREFLUSH	= 0x04,	/* pre-flushing in progress */
	QUEUE_ORDSEQ_BAR	= 0x08,	/* original barrier req in progress */
	QUEUE_ORDSEQ_POSTFLUSH	= 0x10,	/* post-flushing in progress */
	QUEUE_ORDSEQ_DONE	= 0x20,
};
L
Linus Torvalds 已提交
524 525 526 527

#define blk_queue_plugged(q)	test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
#define blk_queue_tagged(q)	test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q)	test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
528
#define blk_queue_flushing(q)	((q)->ordseq)
L
Linus Torvalds 已提交
529

530 531 532 533 534 535 536
#define blk_fs_request(rq)	((rq)->cmd_type == REQ_TYPE_FS)
#define blk_pc_request(rq)	((rq)->cmd_type == REQ_TYPE_BLOCK_PC)
#define blk_special_request(rq)	((rq)->cmd_type == REQ_TYPE_SPECIAL)
#define blk_sense_request(rq)	((rq)->cmd_type == REQ_TYPE_SENSE)

#define blk_noretry_request(rq)	((rq)->cmd_flags & REQ_FAILFAST)
#define blk_rq_started(rq)	((rq)->cmd_flags & REQ_STARTED)
L
Linus Torvalds 已提交
537 538 539

#define blk_account_rq(rq)	(blk_rq_started(rq) && blk_fs_request(rq))

540 541
#define blk_pm_suspend_request(rq)	((rq)->cmd_type == REQ_TYPE_PM_SUSPEND)
#define blk_pm_resume_request(rq)	((rq)->cmd_type == REQ_TYPE_PM_RESUME)
L
Linus Torvalds 已提交
542
#define blk_pm_request(rq)	\
543
	(blk_pm_suspend_request(rq) || blk_pm_resume_request(rq))
L
Linus Torvalds 已提交
544

545 546 547
#define blk_sorted_rq(rq)	((rq)->cmd_flags & REQ_SORTED)
#define blk_barrier_rq(rq)	((rq)->cmd_flags & REQ_HARDBARRIER)
#define blk_fua_rq(rq)		((rq)->cmd_flags & REQ_FUA)
L
Linus Torvalds 已提交
548 549 550

#define list_entry_rq(ptr)	list_entry((ptr), struct request, queuelist)

551
#define rq_data_dir(rq)		((rq)->cmd_flags & 1)
L
Linus Torvalds 已提交
552

553 554 555 556
/*
 * We regard a request as sync, if it's a READ or a SYNC write.
 */
#define rq_is_sync(rq)		(rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC)
557
#define rq_is_meta(rq)		((rq)->cmd_flags & REQ_RW_META)
558

L
Linus Torvalds 已提交
559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589
static inline int blk_queue_full(struct request_queue *q, int rw)
{
	if (rw == READ)
		return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags);
	return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);
}

static inline void blk_set_queue_full(struct request_queue *q, int rw)
{
	if (rw == READ)
		set_bit(QUEUE_FLAG_READFULL, &q->queue_flags);
	else
		set_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);
}

static inline void blk_clear_queue_full(struct request_queue *q, int rw)
{
	if (rw == READ)
		clear_bit(QUEUE_FLAG_READFULL, &q->queue_flags);
	else
		clear_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);
}


/*
 * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may
 * it already be started by driver.
 */
#define RQ_NOMERGE_FLAGS	\
	(REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER)
#define rq_mergeable(rq)	\
590
	(!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq)))
L
Linus Torvalds 已提交
591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611

/*
 * q->prep_rq_fn return values
 */
#define BLKPREP_OK		0	/* serve it */
#define BLKPREP_KILL		1	/* fatal error, kill */
#define BLKPREP_DEFER		2	/* leave on queue */

extern unsigned long blk_max_low_pfn, blk_max_pfn;

/*
 * standard bounce addresses:
 *
 * BLK_BOUNCE_HIGH	: bounce all highmem pages
 * BLK_BOUNCE_ANY	: don't bounce anything
 * BLK_BOUNCE_ISA	: bounce pages above ISA DMA boundary
 */
#define BLK_BOUNCE_HIGH		((u64)blk_max_low_pfn << PAGE_SHIFT)
#define BLK_BOUNCE_ANY		((u64)blk_max_pfn << PAGE_SHIFT)
#define BLK_BOUNCE_ISA		(ISA_DMA_THRESHOLD)

612 613 614 615 616
/*
 * default timeout for SG_IO if none specified
 */
#define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)

L
Linus Torvalds 已提交
617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
#ifdef CONFIG_MMU
extern int init_emergency_isa_pool(void);
extern void blk_queue_bounce(request_queue_t *q, struct bio **bio);
#else
static inline int init_emergency_isa_pool(void)
{
	return 0;
}
static inline void blk_queue_bounce(request_queue_t *q, struct bio **bio)
{
}
#endif /* CONFIG_MMU */

#define rq_for_each_bio(_bio, rq)	\
	if ((rq->bio))			\
		for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)

extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk);
extern void register_disk(struct gendisk *dev);
extern void generic_make_request(struct bio *bio);
extern void blk_put_request(struct request *);
639
extern void __blk_put_request(request_queue_t *, struct request *);
640
extern void blk_end_sync_rq(struct request *rq, int error);
A
Al Viro 已提交
641
extern struct request *blk_get_request(request_queue_t *, int, gfp_t);
642
extern void blk_insert_request(request_queue_t *, struct request *, int, void *);
L
Linus Torvalds 已提交
643 644 645 646 647
extern void blk_requeue_request(request_queue_t *, struct request *);
extern void blk_plug_device(request_queue_t *);
extern int blk_remove_plug(request_queue_t *);
extern void blk_recount_segments(request_queue_t *, struct bio *);
extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *);
648 649
extern int sg_scsi_ioctl(struct file *, struct request_queue *,
		struct gendisk *, struct scsi_ioctl_command __user *);
650

J
Jens Axboe 已提交
651 652 653 654 655
/*
 * Temporary export, until SCSI gets fixed up.
 */
extern int ll_back_merge_fn(request_queue_t *, struct request *, struct bio *);

656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674
/*
 * A queue has just exitted congestion.  Note this in the global counter of
 * congested queues, and wake up anyone who was waiting for requests to be
 * put back.
 */
static inline void blk_clear_queue_congested(request_queue_t *q, int rw)
{
	clear_bdi_congested(&q->backing_dev_info, rw);
}

/*
 * A queue has just entered congestion.  Flag that in the queue's VM-visible
 * state flags and increment the global gounter of congested queues.
 */
static inline void blk_set_queue_congested(request_queue_t *q, int rw)
{
	set_bdi_congested(&q->backing_dev_info, rw);
}

L
Linus Torvalds 已提交
675 676 677 678 679
extern void blk_start_queue(request_queue_t *q);
extern void blk_stop_queue(request_queue_t *q);
extern void blk_sync_queue(struct request_queue *q);
extern void __blk_stop_queue(request_queue_t *q);
extern void blk_run_queue(request_queue_t *);
680
extern void blk_start_queueing(request_queue_t *);
681
extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned long);
J
Jens Axboe 已提交
682
extern int blk_rq_unmap_user(struct bio *);
A
Al Viro 已提交
683
extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t);
684 685
extern int blk_rq_map_user_iov(request_queue_t *, struct request *,
			       struct sg_iovec *, int, unsigned int);
686 687
extern int blk_execute_rq(request_queue_t *, struct gendisk *,
			  struct request *, int);
688
extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *,
689
				  struct request *, int, rq_end_io_fn *);
690 691 692 693 694
extern int blk_fill_sghdr_rq(request_queue_t *, struct request *,
		      struct sg_io_hdr *, int);
extern int blk_unmap_sghdr_rq(struct request *, struct sg_io_hdr *);
extern int blk_complete_sghdr_rq(struct request *, struct sg_io_hdr *,
			  struct bio *);
695

L
Linus Torvalds 已提交
696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724
static inline request_queue_t *bdev_get_queue(struct block_device *bdev)
{
	return bdev->bd_disk->queue;
}

static inline void blk_run_backing_dev(struct backing_dev_info *bdi,
				       struct page *page)
{
	if (bdi && bdi->unplug_io_fn)
		bdi->unplug_io_fn(bdi, page);
}

static inline void blk_run_address_space(struct address_space *mapping)
{
	if (mapping)
		blk_run_backing_dev(mapping->backing_dev_info, NULL);
}

/*
 * end_request() and friends. Must be called with the request queue spinlock
 * acquired. All functions called within end_request() _must_be_ atomic.
 *
 * Several drivers define their own end_request and call
 * end_that_request_first() and end_that_request_last()
 * for parts of the original function. This prevents
 * code duplication in drivers.
 */
extern int end_that_request_first(struct request *, int, int);
extern int end_that_request_chunk(struct request *, int, int);
725
extern void end_that_request_last(struct request *, int);
L
Linus Torvalds 已提交
726
extern void end_request(struct request *req, int uptodate);
727 728
extern void blk_complete_request(struct request *);

L
Linus Torvalds 已提交
729 730 731 732 733 734 735 736 737 738
/*
 * end_that_request_first/chunk() takes an uptodate argument. we account
 * any value <= as an io error. 0 means -EIO for compatability reasons,
 * any other < 0 value is the direct error type. An uptodate value of
 * 1 indicates successful io completion
 */
#define end_io_error(uptodate)	(unlikely((uptodate) <= 0))

static inline void blkdev_dequeue_request(struct request *req)
{
739
	elv_dequeue_request(req->q, req);
L
Linus Torvalds 已提交
740 741 742 743 744
}

/*
 * Access functions for manipulating queue properties
 */
745 746
extern request_queue_t *blk_init_queue_node(request_fn_proc *rfn,
					spinlock_t *lock, int node_id);
L
Linus Torvalds 已提交
747 748 749 750
extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *);
extern void blk_cleanup_queue(request_queue_t *);
extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
extern void blk_queue_bounce_limit(request_queue_t *, u64);
751
extern void blk_queue_max_sectors(request_queue_t *, unsigned int);
L
Linus Torvalds 已提交
752 753 754 755 756 757 758 759 760
extern void blk_queue_max_phys_segments(request_queue_t *, unsigned short);
extern void blk_queue_max_hw_segments(request_queue_t *, unsigned short);
extern void blk_queue_max_segment_size(request_queue_t *, unsigned int);
extern void blk_queue_hardsect_size(request_queue_t *, unsigned short);
extern void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b);
extern void blk_queue_segment_boundary(request_queue_t *, unsigned long);
extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn);
extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *);
extern void blk_queue_dma_alignment(request_queue_t *, int);
761
extern void blk_queue_softirq_done(request_queue_t *, softirq_done_fn *);
L
Linus Torvalds 已提交
762
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
763
extern int blk_queue_ordered(request_queue_t *, unsigned, prepare_flush_fn *);
L
Linus Torvalds 已提交
764
extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *);
765 766 767 768
extern int blk_do_ordered(request_queue_t *, struct request **);
extern unsigned blk_ordered_cur_seq(request_queue_t *);
extern unsigned blk_ordered_req_seq(struct request *);
extern void blk_ordered_complete_seq(request_queue_t *, unsigned, int);
L
Linus Torvalds 已提交
769 770 771 772 773 774 775 776

extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
extern void blk_dump_rq_flags(struct request *, char *);
extern void generic_unplug_device(request_queue_t *);
extern void __generic_unplug_device(request_queue_t *);
extern long nr_blockdev_pages(void);

int blk_get_queue(request_queue_t *);
A
Al Viro 已提交
777 778
request_queue_t *blk_alloc_queue(gfp_t);
request_queue_t *blk_alloc_queue_node(gfp_t, int);
779
extern void blk_put_queue(request_queue_t *);
L
Linus Torvalds 已提交
780 781 782 783 784 785

/*
 * tag stuff
 */
#define blk_queue_tag_depth(q)		((q)->queue_tags->busy)
#define blk_queue_tag_queue(q)		((q)->queue_tags->busy < (q)->queue_tags->max_depth)
786
#define blk_rq_tagged(rq)		((rq)->cmd_flags & REQ_QUEUED)
L
Linus Torvalds 已提交
787 788 789 790 791 792 793
extern int blk_queue_start_tag(request_queue_t *, struct request *);
extern struct request *blk_queue_find_tag(request_queue_t *, int);
extern void blk_queue_end_tag(request_queue_t *, struct request *);
extern int blk_queue_init_tags(request_queue_t *, int, struct blk_queue_tag *);
extern void blk_queue_free_tags(request_queue_t *);
extern int blk_queue_resize_tags(request_queue_t *, int);
extern void blk_queue_invalidate_tags(request_queue_t *);
794 795
extern struct blk_queue_tag *blk_init_tags(int);
extern void blk_free_tags(struct blk_queue_tag *);
L
Linus Torvalds 已提交
796

797 798 799 800 801 802 803 804
static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
						int tag)
{
	if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
		return NULL;
	return bqt->tag_index[tag];
}

L
Linus Torvalds 已提交
805 806 807 808 809
extern void blk_rq_bio_prep(request_queue_t *, struct request *, struct bio *);
extern int blkdev_issue_flush(struct block_device *, sector_t *);

#define MAX_PHYS_SEGMENTS 128
#define MAX_HW_SEGMENTS 128
810 811
#define SAFE_MAX_SECTORS 255
#define BLK_DEF_MAX_SECTORS 1024
L
Linus Torvalds 已提交
812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852

#define MAX_SEGMENT_SIZE	65536

#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)

static inline int queue_hardsect_size(request_queue_t *q)
{
	int retval = 512;

	if (q && q->hardsect_size)
		retval = q->hardsect_size;

	return retval;
}

static inline int bdev_hardsect_size(struct block_device *bdev)
{
	return queue_hardsect_size(bdev_get_queue(bdev));
}

static inline int queue_dma_alignment(request_queue_t *q)
{
	int retval = 511;

	if (q && q->dma_alignment)
		retval = q->dma_alignment;

	return retval;
}

/* assumes size > 256 */
static inline unsigned int blksize_bits(unsigned int size)
{
	unsigned int bits = 8;
	do {
		bits++;
		size >>= 1;
	} while (size > 256);
	return bits;
}

853
static inline unsigned int block_size(struct block_device *bdev)
L
Linus Torvalds 已提交
854 855 856 857 858 859 860 861 862 863 864 865 866 867 868
{
	return bdev->bd_block_size;
}

typedef struct {struct page *v;} Sector;

unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);

static inline void put_dev_sector(Sector p)
{
	page_cache_release(p.v);
}

struct work_struct;
int kblockd_schedule_work(struct work_struct *work);
A
Andrew Morton 已提交
869
void kblockd_flush_work(struct work_struct *work);
L
Linus Torvalds 已提交
870 871 872 873 874 875 876

#define MODULE_ALIAS_BLOCKDEV(major,minor) \
	MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
	MODULE_ALIAS("block-major-" __stringify(major) "-*")


877 878 879 880 881 882 883 884 885 886 887
#else /* CONFIG_BLOCK */
/*
 * stubs for when the block layer is configured out
 */
#define buffer_heads_over_limit 0

static inline long nr_blockdev_pages(void)
{
	return 0;
}

888 889 890
static inline void exit_io_context(void)
{
}
891 892 893

#endif /* CONFIG_BLOCK */

L
Linus Torvalds 已提交
894
#endif