blkdev.h 25.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
#ifndef _LINUX_BLKDEV_H
#define _LINUX_BLKDEV_H

4
#include <linux/sched.h>
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
#include <linux/major.h>
#include <linux/genhd.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/pagemap.h>
#include <linux/backing-dev.h>
#include <linux/wait.h>
#include <linux/mempool.h>
#include <linux/bio.h>
#include <linux/module.h>
#include <linux/stringify.h>

#include <asm/scatterlist.h>

20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
#ifdef CONFIG_LBD
# include <asm/div64.h>
# define sector_div(a, b) do_div(a, b)
#else
# define sector_div(n, b)( \
{ \
	int _res; \
	_res = (n) % (b); \
	(n) /= (b); \
	_res; \
} \
)
#endif

#ifdef CONFIG_BLOCK

36 37
struct scsi_ioctl_command;

L
Linus Torvalds 已提交
38 39 40 41 42
struct request_queue;
typedef struct request_queue request_queue_t;
struct elevator_queue;
typedef struct elevator_queue elevator_t;
struct request_pm_state;
43
struct blk_trace;
L
Linus Torvalds 已提交
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75

#define BLKDEV_MIN_RQ	4
#define BLKDEV_MAX_RQ	128	/* Default maximum */

/*
 * This is the per-process anticipatory I/O scheduler state.
 */
struct as_io_context {
	spinlock_t lock;

	void (*dtor)(struct as_io_context *aic); /* destructor */
	void (*exit)(struct as_io_context *aic); /* called on task exit */

	unsigned long state;
	atomic_t nr_queued; /* queued reads & sync writes */
	atomic_t nr_dispatched; /* number of requests gone to the drivers */

	/* IO History tracking */
	/* Thinktime */
	unsigned long last_end_request;
	unsigned long ttime_total;
	unsigned long ttime_samples;
	unsigned long ttime_mean;
	/* Layout pattern */
	unsigned int seek_samples;
	sector_t last_request_pos;
	u64 seek_total;
	sector_t seek_mean;
};

struct cfq_queue;
struct cfq_io_context {
76
	struct rb_node rb_node;
77 78
	void *key;

79 80
	struct cfq_queue *cfqq[2];

81 82 83
	struct io_context *ioc;

	unsigned long last_end_request;
84 85 86
	sector_t last_request_pos;
 	unsigned long last_queue;

87 88 89 90
	unsigned long ttime_total;
	unsigned long ttime_samples;
	unsigned long ttime_mean;

91 92 93 94
	unsigned int seek_samples;
	u64 seek_total;
	sector_t seek_mean;

95 96
	struct list_head queue_list;

97 98
	void (*dtor)(struct io_context *); /* destructor */
	void (*exit)(struct io_context *); /* called on task exit */
L
Linus Torvalds 已提交
99 100 101 102 103 104 105 106 107
};

/*
 * This is the per-process I/O subsystem state.  It is refcounted and
 * kmalloc'ed. Currently all fields are modified in process io context
 * (apart from the atomic refcount), so require no locking.
 */
struct io_context {
	atomic_t refcount;
108 109
	struct task_struct *task;

110
	unsigned int ioprio_changed;
L
Linus Torvalds 已提交
111 112 113 114 115 116 117 118

	/*
	 * For request batching
	 */
	unsigned long last_waited; /* Time last woken after wait for request */
	int nr_batch_requests;     /* Number of requests left in the batch */

	struct as_io_context *aic;
119
	struct rb_root cic_root;
L
Linus Torvalds 已提交
120 121 122 123
};

void put_io_context(struct io_context *ioc);
void exit_io_context(void);
124
struct io_context *get_io_context(gfp_t gfp_flags, int node);
L
Linus Torvalds 已提交
125 126 127 128
void copy_io_context(struct io_context **pdst, struct io_context **psrc);
void swap_io_context(struct io_context **ioc1, struct io_context **ioc2);

struct request;
129
typedef void (rq_end_io_fn)(struct request *, int);
L
Linus Torvalds 已提交
130 131 132 133

struct request_list {
	int count[2];
	int starved[2];
T
Tejun Heo 已提交
134
	int elvpriv;
L
Linus Torvalds 已提交
135 136 137 138
	mempool_t *rq_pool;
	wait_queue_head_t wait[2];
};

139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
/*
 * request command types
 */
enum rq_cmd_type_bits {
	REQ_TYPE_FS		= 1,	/* fs request */
	REQ_TYPE_BLOCK_PC,		/* scsi command */
	REQ_TYPE_SENSE,			/* sense request */
	REQ_TYPE_PM_SUSPEND,		/* suspend request */
	REQ_TYPE_PM_RESUME,		/* resume request */
	REQ_TYPE_PM_SHUTDOWN,		/* shutdown request */
	REQ_TYPE_FLUSH,			/* flush request */
	REQ_TYPE_SPECIAL,		/* driver defined type */
	REQ_TYPE_LINUX_BLOCK,		/* generic block layer message */
	/*
	 * for ATA/ATAPI devices. this really doesn't belong here, ide should
	 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver
	 * private REQ_LB opcodes to differentiate what type of request this is
	 */
	REQ_TYPE_ATA_CMD,
	REQ_TYPE_ATA_TASK,
	REQ_TYPE_ATA_TASKFILE,
160
	REQ_TYPE_ATA_PC,
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
};

/*
 * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being
 * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a
 * SCSI cdb.
 *
 * 0x00 -> 0x3f are driver private, to be used for whatever purpose they need,
 * typically to differentiate REQ_TYPE_SPECIAL requests.
 *
 */
enum {
	/*
	 * just examples for now
	 */
	REQ_LB_OP_EJECT	= 0x40,		/* eject request */
	REQ_LB_OP_FLUSH = 0x41,		/* flush device */
};

/*
 * request type modified bits. first three bits match BIO_RW* bits, important
 */
enum rq_flag_bits {
	__REQ_RW,		/* not set, read. set, write */
	__REQ_FAILFAST,		/* no low level driver retries */
	__REQ_SORTED,		/* elevator knows about this request */
	__REQ_SOFTBARRIER,	/* may not be passed by ioscheduler */
	__REQ_HARDBARRIER,	/* may not be passed by drive either */
	__REQ_FUA,		/* forced unit access */
	__REQ_NOMERGE,		/* don't touch this for merging */
	__REQ_STARTED,		/* drive already may have started this one */
	__REQ_DONTPREP,		/* don't call prep for this one */
	__REQ_QUEUED,		/* uses queueing */
	__REQ_ELVPRIV,		/* elevator private data attached */
	__REQ_FAILED,		/* set if the request failed */
	__REQ_QUIET,		/* don't worry about errors */
	__REQ_PREEMPT,		/* set for "ide_preempt" requests */
	__REQ_ORDERED_COLOR,	/* is before or after barrier */
	__REQ_RW_SYNC,		/* request is sync (O_DIRECT) */
200
	__REQ_ALLOCED,		/* request came from our alloc pool */
201
	__REQ_RW_META,		/* metadata io request */
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
	__REQ_NR_BITS,		/* stops here */
};

#define REQ_RW		(1 << __REQ_RW)
#define REQ_FAILFAST	(1 << __REQ_FAILFAST)
#define REQ_SORTED	(1 << __REQ_SORTED)
#define REQ_SOFTBARRIER	(1 << __REQ_SOFTBARRIER)
#define REQ_HARDBARRIER	(1 << __REQ_HARDBARRIER)
#define REQ_FUA		(1 << __REQ_FUA)
#define REQ_NOMERGE	(1 << __REQ_NOMERGE)
#define REQ_STARTED	(1 << __REQ_STARTED)
#define REQ_DONTPREP	(1 << __REQ_DONTPREP)
#define REQ_QUEUED	(1 << __REQ_QUEUED)
#define REQ_ELVPRIV	(1 << __REQ_ELVPRIV)
#define REQ_FAILED	(1 << __REQ_FAILED)
#define REQ_QUIET	(1 << __REQ_QUIET)
#define REQ_PREEMPT	(1 << __REQ_PREEMPT)
#define REQ_ORDERED_COLOR	(1 << __REQ_ORDERED_COLOR)
#define REQ_RW_SYNC	(1 << __REQ_RW_SYNC)
221
#define REQ_ALLOCED	(1 << __REQ_ALLOCED)
222
#define REQ_RW_META	(1 << __REQ_RW_META)
223

L
Linus Torvalds 已提交
224 225 226 227 228 229
#define BLK_MAX_CDB	16

/*
 * try to put the fields that are referenced together in the same cacheline
 */
struct request {
230 231 232
	struct list_head queuelist;
	struct list_head donelist;

233 234
	request_queue_t *q;

235 236
	unsigned int cmd_flags;
	enum rq_cmd_type_bits cmd_type;
L
Linus Torvalds 已提交
237 238 239 240 241 242

	/* Maintain bio traversal state for part by part I/O submission.
	 * hard_* are block layer internals, no driver should touch them!
	 */

	sector_t sector;		/* next sector to submit */
243
	sector_t hard_sector;		/* next sector to complete */
L
Linus Torvalds 已提交
244
	unsigned long nr_sectors;	/* no. of sectors left to submit */
245
	unsigned long hard_nr_sectors;	/* no. of sectors left to complete */
L
Linus Torvalds 已提交
246 247 248 249 250 251 252 253 254
	/* no. of sectors left to submit in the current segment */
	unsigned int current_nr_sectors;

	/* no. of sectors left to complete in the current segment */
	unsigned int hard_cur_sectors;

	struct bio *bio;
	struct bio *biotail;

255
	struct hlist_node hash;	/* merge hash */
256 257 258 259 260 261 262 263 264
	/*
	 * The rb_node is only used inside the io scheduler, requests
	 * are pruned when moved to the dispatch queue. So let the
	 * completion_data share space with the rb_node.
	 */
	union {
		struct rb_node rb_node;	/* sort/lookup */
		void *completion_data;
	};
265

266 267 268 269
	/*
	 * two pointers are available for the IO schedulers, if they need
	 * more they have to dynamically allocate it.
	 */
L
Linus Torvalds 已提交
270
	void *elevator_private;
271 272
	void *elevator_private2;

273
	struct gendisk *rq_disk;
L
Linus Torvalds 已提交
274 275 276 277 278 279 280 281 282 283 284 285 286 287
	unsigned long start_time;

	/* Number of scatter-gather DMA addr+len pairs after
	 * physical address coalescing is performed.
	 */
	unsigned short nr_phys_segments;

	/* Number of scatter-gather addr+len pairs after
	 * physical and DMA remapping hardware coalescing is performed.
	 * This is the number of scatter-gather entries the driver
	 * will actually have to deal with after DMA mapping is done.
	 */
	unsigned short nr_hw_segments;

288 289
	unsigned short ioprio;

L
Linus Torvalds 已提交
290
	void *special;
291
	char *buffer;
L
Linus Torvalds 已提交
292

293 294 295 296 297
	int tag;
	int errors;

	int ref_count;

L
Linus Torvalds 已提交
298 299 300 301 302 303 304 305
	/*
	 * when request is used as a packet command carrier
	 */
	unsigned int cmd_len;
	unsigned char cmd[BLK_MAX_CDB];

	unsigned int data_len;
	unsigned int sense_len;
306
	void *data;
L
Linus Torvalds 已提交
307 308 309
	void *sense;

	unsigned int timeout;
310
	int retries;
L
Linus Torvalds 已提交
311 312

	/*
313
	 * completion callback.
L
Linus Torvalds 已提交
314 315 316 317 318 319
	 */
	rq_end_io_fn *end_io;
	void *end_io_data;
};

/*
320
 * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME
L
Linus Torvalds 已提交
321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
 * requests. Some step values could eventually be made generic.
 */
struct request_pm_state
{
	/* PM state machine step value, currently driver specific */
	int	pm_step;
	/* requested PM state value (S1, S2, S3, S4, ...) */
	u32	pm_state;
	void*	data;		/* for driver use */
};

#include <linux/elevator.h>

typedef void (request_fn_proc) (request_queue_t *q);
typedef int (make_request_fn) (request_queue_t *q, struct bio *bio);
typedef int (prep_rq_fn) (request_queue_t *, struct request *);
typedef void (unplug_fn) (request_queue_t *);

struct bio_vec;
typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *);
typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *);
342
typedef void (prepare_flush_fn) (request_queue_t *, struct request *);
343
typedef void (softirq_done_fn)(struct request *);
L
Linus Torvalds 已提交
344 345 346 347 348 349 350 351 352 353 354 355

enum blk_queue_state {
	Queue_down,
	Queue_up,
};

struct blk_queue_tag {
	struct request **tag_index;	/* map of busy tags */
	unsigned long *tag_map;		/* bit map of free/busy tags */
	struct list_head busy_list;	/* fifo list of busy tags */
	int busy;			/* current depth */
	int max_depth;			/* what we will send to device */
356
	int real_max_depth;		/* what the array can hold */
L
Linus Torvalds 已提交
357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
	atomic_t refcnt;		/* map can be shared */
};

struct request_queue
{
	/*
	 * Together with queue_head for cacheline sharing
	 */
	struct list_head	queue_head;
	struct request		*last_merge;
	elevator_t		*elevator;

	/*
	 * the queue request freelist, one for reads and one for writes
	 */
	struct request_list	rq;

	request_fn_proc		*request_fn;
	make_request_fn		*make_request_fn;
	prep_rq_fn		*prep_rq_fn;
	unplug_fn		*unplug_fn;
	merge_bvec_fn		*merge_bvec_fn;
	issue_flush_fn		*issue_flush_fn;
	prepare_flush_fn	*prepare_flush_fn;
381
	softirq_done_fn		*softirq_done_fn;
L
Linus Torvalds 已提交
382

383 384 385
	/*
	 * Dispatch queue sorting
	 */
J
Jens Axboe 已提交
386
	sector_t		end_sector;
387 388
	struct request		*boundary_rq;

L
Linus Torvalds 已提交
389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408
	/*
	 * Auto-unplugging state
	 */
	struct timer_list	unplug_timer;
	int			unplug_thresh;	/* After this many requests */
	unsigned long		unplug_delay;	/* After this many jiffies */
	struct work_struct	unplug_work;

	struct backing_dev_info	backing_dev_info;

	/*
	 * The queue owner gets to use this for whatever they like.
	 * ll_rw_blk doesn't touch it.
	 */
	void			*queuedata;

	/*
	 * queue needs bounce pages for pages above this limit
	 */
	unsigned long		bounce_pfn;
A
Al Viro 已提交
409
	gfp_t			bounce_gfp;
L
Linus Torvalds 已提交
410 411 412 413 414 415 416

	/*
	 * various queue flags, see QUEUE_* below
	 */
	unsigned long		queue_flags;

	/*
已提交
417 418 419
	 * protects queue structures from reentrancy. ->__queue_lock should
	 * _never_ be used directly, it is queue private. always use
	 * ->queue_lock.
L
Linus Torvalds 已提交
420
	 */
已提交
421
	spinlock_t		__queue_lock;
L
Linus Torvalds 已提交
422 423 424 425 426 427 428 429 430 431 432 433 434 435 436
	spinlock_t		*queue_lock;

	/*
	 * queue kobject
	 */
	struct kobject kobj;

	/*
	 * queue settings
	 */
	unsigned long		nr_requests;	/* Max # of requests */
	unsigned int		nr_congestion_on;
	unsigned int		nr_congestion_off;
	unsigned int		nr_batching;

437 438
	unsigned int		max_sectors;
	unsigned int		max_hw_sectors;
L
Linus Torvalds 已提交
439 440 441 442 443 444 445 446 447 448
	unsigned short		max_phys_segments;
	unsigned short		max_hw_segments;
	unsigned short		hardsect_size;
	unsigned int		max_segment_size;

	unsigned long		seg_boundary_mask;
	unsigned int		dma_alignment;

	struct blk_queue_tag	*queue_tags;

449
	unsigned int		nr_sorted;
L
Linus Torvalds 已提交
450 451 452 453 454 455 456
	unsigned int		in_flight;

	/*
	 * sg stuff
	 */
	unsigned int		sg_timeout;
	unsigned int		sg_reserved_size;
457
	int			node;
458
#ifdef CONFIG_BLK_DEV_IO_TRACE
459
	struct blk_trace	*blk_trace;
460
#endif
L
Linus Torvalds 已提交
461 462 463
	/*
	 * reserved for flush operations
	 */
464 465 466 467 468
	unsigned int		ordered, next_ordered, ordseq;
	int			orderr, ordcolor;
	struct request		pre_flush_rq, bar_rq, post_flush_rq;
	struct request		*orig_bar_rq;
	unsigned int		bi_size;
469 470

	struct mutex		sysfs_lock;
L
Linus Torvalds 已提交
471 472 473 474 475 476 477 478 479 480
};

#define QUEUE_FLAG_CLUSTER	0	/* cluster several segments into 1 */
#define QUEUE_FLAG_QUEUED	1	/* uses generic tag queueing */
#define QUEUE_FLAG_STOPPED	2	/* queue is stopped */
#define	QUEUE_FLAG_READFULL	3	/* write queue has been filled */
#define QUEUE_FLAG_WRITEFULL	4	/* read queue has been filled */
#define QUEUE_FLAG_DEAD		5	/* queue being torn down */
#define QUEUE_FLAG_REENTER	6	/* Re-entrancy avoidance */
#define QUEUE_FLAG_PLUGGED	7	/* queue is plugged */
J
Jens Axboe 已提交
481
#define QUEUE_FLAG_ELVSWITCH	8	/* don't use elevator, just do FIFO */
482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521

enum {
	/*
	 * Hardbarrier is supported with one of the following methods.
	 *
	 * NONE		: hardbarrier unsupported
	 * DRAIN	: ordering by draining is enough
	 * DRAIN_FLUSH	: ordering by draining w/ pre and post flushes
	 * DRAIN_FUA	: ordering by draining w/ pre flush and FUA write
	 * TAG		: ordering by tag is enough
	 * TAG_FLUSH	: ordering by tag w/ pre and post flushes
	 * TAG_FUA	: ordering by tag w/ pre flush and FUA write
	 */
	QUEUE_ORDERED_NONE	= 0x00,
	QUEUE_ORDERED_DRAIN	= 0x01,
	QUEUE_ORDERED_TAG	= 0x02,

	QUEUE_ORDERED_PREFLUSH	= 0x10,
	QUEUE_ORDERED_POSTFLUSH	= 0x20,
	QUEUE_ORDERED_FUA	= 0x40,

	QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
			QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
	QUEUE_ORDERED_DRAIN_FUA	= QUEUE_ORDERED_DRAIN |
			QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
	QUEUE_ORDERED_TAG_FLUSH	= QUEUE_ORDERED_TAG |
			QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
	QUEUE_ORDERED_TAG_FUA	= QUEUE_ORDERED_TAG |
			QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,

	/*
	 * Ordered operation sequence
	 */
	QUEUE_ORDSEQ_STARTED	= 0x01,	/* flushing in progress */
	QUEUE_ORDSEQ_DRAIN	= 0x02,	/* waiting for the queue to be drained */
	QUEUE_ORDSEQ_PREFLUSH	= 0x04,	/* pre-flushing in progress */
	QUEUE_ORDSEQ_BAR	= 0x08,	/* original barrier req in progress */
	QUEUE_ORDSEQ_POSTFLUSH	= 0x10,	/* post-flushing in progress */
	QUEUE_ORDSEQ_DONE	= 0x20,
};
L
Linus Torvalds 已提交
522 523 524 525

#define blk_queue_plugged(q)	test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
#define blk_queue_tagged(q)	test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q)	test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
526
#define blk_queue_flushing(q)	((q)->ordseq)
L
Linus Torvalds 已提交
527

528 529 530 531 532 533 534
#define blk_fs_request(rq)	((rq)->cmd_type == REQ_TYPE_FS)
#define blk_pc_request(rq)	((rq)->cmd_type == REQ_TYPE_BLOCK_PC)
#define blk_special_request(rq)	((rq)->cmd_type == REQ_TYPE_SPECIAL)
#define blk_sense_request(rq)	((rq)->cmd_type == REQ_TYPE_SENSE)

#define blk_noretry_request(rq)	((rq)->cmd_flags & REQ_FAILFAST)
#define blk_rq_started(rq)	((rq)->cmd_flags & REQ_STARTED)
L
Linus Torvalds 已提交
535 536 537

#define blk_account_rq(rq)	(blk_rq_started(rq) && blk_fs_request(rq))

538 539
#define blk_pm_suspend_request(rq)	((rq)->cmd_type == REQ_TYPE_PM_SUSPEND)
#define blk_pm_resume_request(rq)	((rq)->cmd_type == REQ_TYPE_PM_RESUME)
L
Linus Torvalds 已提交
540
#define blk_pm_request(rq)	\
541
	(blk_pm_suspend_request(rq) || blk_pm_resume_request(rq))
L
Linus Torvalds 已提交
542

543 544 545
#define blk_sorted_rq(rq)	((rq)->cmd_flags & REQ_SORTED)
#define blk_barrier_rq(rq)	((rq)->cmd_flags & REQ_HARDBARRIER)
#define blk_fua_rq(rq)		((rq)->cmd_flags & REQ_FUA)
L
Linus Torvalds 已提交
546 547 548

#define list_entry_rq(ptr)	list_entry((ptr), struct request, queuelist)

549
#define rq_data_dir(rq)		((rq)->cmd_flags & 1)
L
Linus Torvalds 已提交
550

551 552 553 554
/*
 * We regard a request as sync, if it's a READ or a SYNC write.
 */
#define rq_is_sync(rq)		(rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC)
555
#define rq_is_meta(rq)		((rq)->cmd_flags & REQ_RW_META)
556

L
Linus Torvalds 已提交
557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587
static inline int blk_queue_full(struct request_queue *q, int rw)
{
	if (rw == READ)
		return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags);
	return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);
}

static inline void blk_set_queue_full(struct request_queue *q, int rw)
{
	if (rw == READ)
		set_bit(QUEUE_FLAG_READFULL, &q->queue_flags);
	else
		set_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);
}

static inline void blk_clear_queue_full(struct request_queue *q, int rw)
{
	if (rw == READ)
		clear_bit(QUEUE_FLAG_READFULL, &q->queue_flags);
	else
		clear_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);
}


/*
 * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may
 * it already be started by driver.
 */
#define RQ_NOMERGE_FLAGS	\
	(REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER)
#define rq_mergeable(rq)	\
588
	(!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq)))
L
Linus Torvalds 已提交
589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631

/*
 * q->prep_rq_fn return values
 */
#define BLKPREP_OK		0	/* serve it */
#define BLKPREP_KILL		1	/* fatal error, kill */
#define BLKPREP_DEFER		2	/* leave on queue */

extern unsigned long blk_max_low_pfn, blk_max_pfn;

/*
 * standard bounce addresses:
 *
 * BLK_BOUNCE_HIGH	: bounce all highmem pages
 * BLK_BOUNCE_ANY	: don't bounce anything
 * BLK_BOUNCE_ISA	: bounce pages above ISA DMA boundary
 */
#define BLK_BOUNCE_HIGH		((u64)blk_max_low_pfn << PAGE_SHIFT)
#define BLK_BOUNCE_ANY		((u64)blk_max_pfn << PAGE_SHIFT)
#define BLK_BOUNCE_ISA		(ISA_DMA_THRESHOLD)

#ifdef CONFIG_MMU
extern int init_emergency_isa_pool(void);
extern void blk_queue_bounce(request_queue_t *q, struct bio **bio);
#else
static inline int init_emergency_isa_pool(void)
{
	return 0;
}
static inline void blk_queue_bounce(request_queue_t *q, struct bio **bio)
{
}
#endif /* CONFIG_MMU */

#define rq_for_each_bio(_bio, rq)	\
	if ((rq->bio))			\
		for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)

extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk);
extern void register_disk(struct gendisk *dev);
extern void generic_make_request(struct bio *bio);
extern void blk_put_request(struct request *);
632
extern void __blk_put_request(request_queue_t *, struct request *);
633
extern void blk_end_sync_rq(struct request *rq, int error);
A
Al Viro 已提交
634
extern struct request *blk_get_request(request_queue_t *, int, gfp_t);
635
extern void blk_insert_request(request_queue_t *, struct request *, int, void *);
L
Linus Torvalds 已提交
636 637 638 639 640
extern void blk_requeue_request(request_queue_t *, struct request *);
extern void blk_plug_device(request_queue_t *);
extern int blk_remove_plug(request_queue_t *);
extern void blk_recount_segments(request_queue_t *, struct bio *);
extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *);
641 642
extern int sg_scsi_ioctl(struct file *, struct request_queue *,
		struct gendisk *, struct scsi_ioctl_command __user *);
643

J
Jens Axboe 已提交
644 645 646 647 648
/*
 * Temporary export, until SCSI gets fixed up.
 */
extern int ll_back_merge_fn(request_queue_t *, struct request *, struct bio *);

649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667
/*
 * A queue has just exitted congestion.  Note this in the global counter of
 * congested queues, and wake up anyone who was waiting for requests to be
 * put back.
 */
static inline void blk_clear_queue_congested(request_queue_t *q, int rw)
{
	clear_bdi_congested(&q->backing_dev_info, rw);
}

/*
 * A queue has just entered congestion.  Flag that in the queue's VM-visible
 * state flags and increment the global gounter of congested queues.
 */
static inline void blk_set_queue_congested(request_queue_t *q, int rw)
{
	set_bdi_congested(&q->backing_dev_info, rw);
}

L
Linus Torvalds 已提交
668 669 670 671 672
extern void blk_start_queue(request_queue_t *q);
extern void blk_stop_queue(request_queue_t *q);
extern void blk_sync_queue(struct request_queue *q);
extern void __blk_stop_queue(request_queue_t *q);
extern void blk_run_queue(request_queue_t *);
673
extern void blk_start_queueing(request_queue_t *);
674
extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned long);
J
Jens Axboe 已提交
675
extern int blk_rq_unmap_user(struct bio *);
A
Al Viro 已提交
676
extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t);
677 678
extern int blk_rq_map_user_iov(request_queue_t *, struct request *,
			       struct sg_iovec *, int, unsigned int);
679 680
extern int blk_execute_rq(request_queue_t *, struct gendisk *,
			  struct request *, int);
681
extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *,
682
				  struct request *, int, rq_end_io_fn *);
683

L
Linus Torvalds 已提交
684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712
static inline request_queue_t *bdev_get_queue(struct block_device *bdev)
{
	return bdev->bd_disk->queue;
}

static inline void blk_run_backing_dev(struct backing_dev_info *bdi,
				       struct page *page)
{
	if (bdi && bdi->unplug_io_fn)
		bdi->unplug_io_fn(bdi, page);
}

static inline void blk_run_address_space(struct address_space *mapping)
{
	if (mapping)
		blk_run_backing_dev(mapping->backing_dev_info, NULL);
}

/*
 * end_request() and friends. Must be called with the request queue spinlock
 * acquired. All functions called within end_request() _must_be_ atomic.
 *
 * Several drivers define their own end_request and call
 * end_that_request_first() and end_that_request_last()
 * for parts of the original function. This prevents
 * code duplication in drivers.
 */
extern int end_that_request_first(struct request *, int, int);
extern int end_that_request_chunk(struct request *, int, int);
713
extern void end_that_request_last(struct request *, int);
L
Linus Torvalds 已提交
714
extern void end_request(struct request *req, int uptodate);
715 716
extern void blk_complete_request(struct request *);

L
Linus Torvalds 已提交
717 718 719 720 721 722 723 724 725 726
/*
 * end_that_request_first/chunk() takes an uptodate argument. we account
 * any value <= as an io error. 0 means -EIO for compatability reasons,
 * any other < 0 value is the direct error type. An uptodate value of
 * 1 indicates successful io completion
 */
#define end_io_error(uptodate)	(unlikely((uptodate) <= 0))

static inline void blkdev_dequeue_request(struct request *req)
{
727
	elv_dequeue_request(req->q, req);
L
Linus Torvalds 已提交
728 729 730 731 732
}

/*
 * Access functions for manipulating queue properties
 */
733 734
extern request_queue_t *blk_init_queue_node(request_fn_proc *rfn,
					spinlock_t *lock, int node_id);
L
Linus Torvalds 已提交
735 736 737 738
extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *);
extern void blk_cleanup_queue(request_queue_t *);
extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
extern void blk_queue_bounce_limit(request_queue_t *, u64);
739
extern void blk_queue_max_sectors(request_queue_t *, unsigned int);
L
Linus Torvalds 已提交
740 741 742 743 744 745 746 747 748
extern void blk_queue_max_phys_segments(request_queue_t *, unsigned short);
extern void blk_queue_max_hw_segments(request_queue_t *, unsigned short);
extern void blk_queue_max_segment_size(request_queue_t *, unsigned int);
extern void blk_queue_hardsect_size(request_queue_t *, unsigned short);
extern void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b);
extern void blk_queue_segment_boundary(request_queue_t *, unsigned long);
extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn);
extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *);
extern void blk_queue_dma_alignment(request_queue_t *, int);
749
extern void blk_queue_softirq_done(request_queue_t *, softirq_done_fn *);
L
Linus Torvalds 已提交
750
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
751
extern int blk_queue_ordered(request_queue_t *, unsigned, prepare_flush_fn *);
L
Linus Torvalds 已提交
752
extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *);
753 754 755 756
extern int blk_do_ordered(request_queue_t *, struct request **);
extern unsigned blk_ordered_cur_seq(request_queue_t *);
extern unsigned blk_ordered_req_seq(struct request *);
extern void blk_ordered_complete_seq(request_queue_t *, unsigned, int);
L
Linus Torvalds 已提交
757 758 759 760 761 762 763 764

extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
extern void blk_dump_rq_flags(struct request *, char *);
extern void generic_unplug_device(request_queue_t *);
extern void __generic_unplug_device(request_queue_t *);
extern long nr_blockdev_pages(void);

int blk_get_queue(request_queue_t *);
A
Al Viro 已提交
765 766
request_queue_t *blk_alloc_queue(gfp_t);
request_queue_t *blk_alloc_queue_node(gfp_t, int);
767
extern void blk_put_queue(request_queue_t *);
L
Linus Torvalds 已提交
768 769 770 771 772 773

/*
 * tag stuff
 */
#define blk_queue_tag_depth(q)		((q)->queue_tags->busy)
#define blk_queue_tag_queue(q)		((q)->queue_tags->busy < (q)->queue_tags->max_depth)
774
#define blk_rq_tagged(rq)		((rq)->cmd_flags & REQ_QUEUED)
L
Linus Torvalds 已提交
775 776 777 778 779 780 781
extern int blk_queue_start_tag(request_queue_t *, struct request *);
extern struct request *blk_queue_find_tag(request_queue_t *, int);
extern void blk_queue_end_tag(request_queue_t *, struct request *);
extern int blk_queue_init_tags(request_queue_t *, int, struct blk_queue_tag *);
extern void blk_queue_free_tags(request_queue_t *);
extern int blk_queue_resize_tags(request_queue_t *, int);
extern void blk_queue_invalidate_tags(request_queue_t *);
782 783
extern struct blk_queue_tag *blk_init_tags(int);
extern void blk_free_tags(struct blk_queue_tag *);
L
Linus Torvalds 已提交
784

785 786 787 788 789 790 791 792
static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
						int tag)
{
	if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
		return NULL;
	return bqt->tag_index[tag];
}

L
Linus Torvalds 已提交
793 794 795 796 797
extern void blk_rq_bio_prep(request_queue_t *, struct request *, struct bio *);
extern int blkdev_issue_flush(struct block_device *, sector_t *);

#define MAX_PHYS_SEGMENTS 128
#define MAX_HW_SEGMENTS 128
798 799
#define SAFE_MAX_SECTORS 255
#define BLK_DEF_MAX_SECTORS 1024
L
Linus Torvalds 已提交
800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840

#define MAX_SEGMENT_SIZE	65536

#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)

static inline int queue_hardsect_size(request_queue_t *q)
{
	int retval = 512;

	if (q && q->hardsect_size)
		retval = q->hardsect_size;

	return retval;
}

static inline int bdev_hardsect_size(struct block_device *bdev)
{
	return queue_hardsect_size(bdev_get_queue(bdev));
}

static inline int queue_dma_alignment(request_queue_t *q)
{
	int retval = 511;

	if (q && q->dma_alignment)
		retval = q->dma_alignment;

	return retval;
}

/* assumes size > 256 */
static inline unsigned int blksize_bits(unsigned int size)
{
	unsigned int bits = 8;
	do {
		bits++;
		size >>= 1;
	} while (size > 256);
	return bits;
}

841
static inline unsigned int block_size(struct block_device *bdev)
L
Linus Torvalds 已提交
842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864
{
	return bdev->bd_block_size;
}

typedef struct {struct page *v;} Sector;

unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);

static inline void put_dev_sector(Sector p)
{
	page_cache_release(p.v);
}

struct work_struct;
int kblockd_schedule_work(struct work_struct *work);
void kblockd_flush(void);

#define MODULE_ALIAS_BLOCKDEV(major,minor) \
	MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
	MODULE_ALIAS("block-major-" __stringify(major) "-*")


865 866 867 868 869 870 871 872
#else /* CONFIG_BLOCK */
/*
 * stubs for when the block layer is configured out
 */
#define buffer_heads_over_limit 0

static inline long blk_congestion_wait(int rw, long timeout)
{
873
	return io_schedule_timeout(timeout);
874 875 876 877 878 879 880
}

static inline long nr_blockdev_pages(void)
{
	return 0;
}

881 882 883
static inline void exit_io_context(void)
{
}
884 885 886

#endif /* CONFIG_BLOCK */

L
Linus Torvalds 已提交
887
#endif