blk-settings.c 22.1 KB
Newer Older
J
Jens Axboe 已提交
1 2 3 4 5 6 7 8 9
/*
 * Functions related to setting various queue properties from drivers
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
10
#include <linux/gcd.h>
J
Jens Axboe 已提交
11 12 13

#include "blk.h"

14
unsigned long blk_max_low_pfn;
J
Jens Axboe 已提交
15
EXPORT_SYMBOL(blk_max_low_pfn);
16 17

unsigned long blk_max_pfn;
J
Jens Axboe 已提交
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35

/**
 * blk_queue_prep_rq - set a prepare_request function for queue
 * @q:		queue
 * @pfn:	prepare_request function
 *
 * It's possible for a queue to register a prepare_request callback which
 * is invoked before the request is handed to the request_fn. The goal of
 * the function is to prepare a request for I/O, it can be used to build a
 * cdb from the request data for instance.
 *
 */
void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
{
	q->prep_rq_fn = pfn;
}
EXPORT_SYMBOL(blk_queue_prep_rq);

D
David Woodhouse 已提交
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
/**
 * blk_queue_set_discard - set a discard_sectors function for queue
 * @q:		queue
 * @dfn:	prepare_discard function
 *
 * It's possible for a queue to register a discard callback which is used
 * to transform a discard request into the appropriate type for the
 * hardware. If none is registered, then discard requests are failed
 * with %EOPNOTSUPP.
 *
 */
void blk_queue_set_discard(struct request_queue *q, prepare_discard_fn *dfn)
{
	q->prepare_discard_fn = dfn;
}
EXPORT_SYMBOL(blk_queue_set_discard);

J
Jens Axboe 已提交
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
/**
 * blk_queue_merge_bvec - set a merge_bvec function for queue
 * @q:		queue
 * @mbfn:	merge_bvec_fn
 *
 * Usually queues have static limitations on the max sectors or segments that
 * we can put in a request. Stacking drivers may have some settings that
 * are dynamic, and thus we have to query the queue whether it is ok to
 * add a new bio_vec to a bio at a given offset or not. If the block device
 * has such limitations, it needs to register a merge_bvec_fn to control
 * the size of bio's sent to it. Note that a block device *must* allow a
 * single page to be added to an empty bio. The block device driver may want
 * to use the bio_split() function to deal with these bio's. By default
 * no merge_bvec_fn is defined for a queue, and only the fixed limits are
 * honored.
 */
void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
{
	q->merge_bvec_fn = mbfn;
}
EXPORT_SYMBOL(blk_queue_merge_bvec);

void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
{
	q->softirq_done_fn = fn;
}
EXPORT_SYMBOL(blk_queue_softirq_done);

J
Jens Axboe 已提交
81 82 83 84 85 86 87 88 89 90 91 92
void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
{
	q->rq_timeout = timeout;
}
EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);

void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
{
	q->rq_timed_out_fn = fn;
}
EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);

93 94 95 96 97 98
void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
{
	q->lld_busy_fn = fn;
}
EXPORT_SYMBOL_GPL(blk_queue_lld_busy);

99 100
/**
 * blk_set_default_limits - reset limits to default values
101
 * @lim:  the queue_limits structure to reset
102 103 104 105 106 107 108 109 110 111 112 113 114 115
 *
 * Description:
 *   Returns a queue_limit struct to its default state.  Can be used by
 *   stacking drivers like DM that stage table swaps and reuse an
 *   existing device queue.
 */
void blk_set_default_limits(struct queue_limits *lim)
{
	lim->max_phys_segments = MAX_PHYS_SEGMENTS;
	lim->max_hw_segments = MAX_HW_SEGMENTS;
	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
	lim->max_segment_size = MAX_SEGMENT_SIZE;
	lim->max_sectors = lim->max_hw_sectors = SAFE_MAX_SECTORS;
	lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
116
	lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
117 118 119 120 121 122 123
	lim->alignment_offset = 0;
	lim->io_opt = 0;
	lim->misaligned = 0;
	lim->no_cluster = 0;
}
EXPORT_SYMBOL(blk_set_default_limits);

J
Jens Axboe 已提交
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
/**
 * blk_queue_make_request - define an alternate make_request function for a device
 * @q:  the request queue for the device to be affected
 * @mfn: the alternate make_request function
 *
 * Description:
 *    The normal way for &struct bios to be passed to a device
 *    driver is for them to be collected into requests on a request
 *    queue, and then to allow the device driver to select requests
 *    off that queue when it is ready.  This works well for many block
 *    devices. However some block devices (typically virtual devices
 *    such as md or lvm) do not benefit from the processing on the
 *    request queue, and are served best by having the requests passed
 *    directly to them.  This can be achieved by providing a function
 *    to blk_queue_make_request().
 *
 * Caveat:
 *    The driver that does this *must* be able to deal appropriately
 *    with buffers in "highmemory". This can be accomplished by either calling
 *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
 *    blk_queue_bounce() to create a buffer in normal memory.
 **/
146
void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
J
Jens Axboe 已提交
147 148 149 150 151
{
	/*
	 * set defaults
	 */
	q->nr_requests = BLKDEV_MAX_RQ;
152

J
Jens Axboe 已提交
153 154 155 156 157 158 159 160 161 162 163 164 165
	q->make_request_fn = mfn;
	blk_queue_dma_alignment(q, 511);
	blk_queue_congestion_threshold(q);
	q->nr_batching = BLK_BATCH_REQ;

	q->unplug_thresh = 4;		/* hmm */
	q->unplug_delay = (3 * HZ) / 1000;	/* 3 milliseconds */
	if (q->unplug_delay == 0)
		q->unplug_delay = 1;

	q->unplug_timer.function = blk_unplug_timeout;
	q->unplug_timer.data = (unsigned long)q;

166 167
	blk_set_default_limits(&q->limits);

168 169 170 171 172 173 174
	/*
	 * If the caller didn't supply a lock, fall back to our embedded
	 * per-queue locks
	 */
	if (!q->queue_lock)
		q->queue_lock = &q->__queue_lock;

J
Jens Axboe 已提交
175 176 177 178 179 180 181 182 183
	/*
	 * by default assume old behaviour and bounce for any highmem page
	 */
	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
}
EXPORT_SYMBOL(blk_queue_make_request);

/**
 * blk_queue_bounce_limit - set bounce buffer limit for queue
184 185
 * @q: the request queue for the device
 * @dma_mask: the maximum address the device can handle
J
Jens Axboe 已提交
186 187 188 189 190
 *
 * Description:
 *    Different hardware can have different requirements as to what pages
 *    it can do I/O directly to. A low level driver can call
 *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
191
 *    buffers for doing I/O to pages residing above @dma_mask.
J
Jens Axboe 已提交
192
 **/
193
void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
J
Jens Axboe 已提交
194
{
195
	unsigned long b_pfn = dma_mask >> PAGE_SHIFT;
J
Jens Axboe 已提交
196 197 198 199
	int dma = 0;

	q->bounce_gfp = GFP_NOIO;
#if BITS_PER_LONG == 64
200 201 202 203 204 205
	/*
	 * Assume anything <= 4GB can be handled by IOMMU.  Actually
	 * some IOMMUs can handle everything, but I don't know of a
	 * way to test this here.
	 */
	if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
J
Jens Axboe 已提交
206
		dma = 1;
207
	q->limits.bounce_pfn = max_low_pfn;
J
Jens Axboe 已提交
208
#else
209
	if (b_pfn < blk_max_low_pfn)
J
Jens Axboe 已提交
210
		dma = 1;
211
	q->limits.bounce_pfn = b_pfn;
J
Jens Axboe 已提交
212 213 214 215
#endif
	if (dma) {
		init_emergency_isa_pool();
		q->bounce_gfp = GFP_NOIO | GFP_DMA;
216
		q->limits.bounce_pfn = b_pfn;
J
Jens Axboe 已提交
217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
	}
}
EXPORT_SYMBOL(blk_queue_bounce_limit);

/**
 * blk_queue_max_sectors - set max sectors for a request for this queue
 * @q:  the request queue for the device
 * @max_sectors:  max sectors in the usual 512b unit
 *
 * Description:
 *    Enables a low level driver to set an upper limit on the size of
 *    received requests.
 **/
void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
{
	if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
		max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
234 235
		printk(KERN_INFO "%s: set to minimum %d\n",
		       __func__, max_sectors);
J
Jens Axboe 已提交
236 237 238
	}

	if (BLK_DEF_MAX_SECTORS > max_sectors)
239
		q->limits.max_hw_sectors = q->limits.max_sectors = max_sectors;
J
Jens Axboe 已提交
240
	else {
241 242
		q->limits.max_sectors = BLK_DEF_MAX_SECTORS;
		q->limits.max_hw_sectors = max_sectors;
J
Jens Axboe 已提交
243 244 245 246
	}
}
EXPORT_SYMBOL(blk_queue_max_sectors);

247 248 249
void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors)
{
	if (BLK_DEF_MAX_SECTORS > max_sectors)
250
		q->limits.max_hw_sectors = BLK_DEF_MAX_SECTORS;
251
	else
252
		q->limits.max_hw_sectors = max_sectors;
253 254 255
}
EXPORT_SYMBOL(blk_queue_max_hw_sectors);

J
Jens Axboe 已提交
256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
/**
 * blk_queue_max_phys_segments - set max phys segments for a request for this queue
 * @q:  the request queue for the device
 * @max_segments:  max number of segments
 *
 * Description:
 *    Enables a low level driver to set an upper limit on the number of
 *    physical data segments in a request.  This would be the largest sized
 *    scatter list the driver could handle.
 **/
void blk_queue_max_phys_segments(struct request_queue *q,
				 unsigned short max_segments)
{
	if (!max_segments) {
		max_segments = 1;
271 272
		printk(KERN_INFO "%s: set to minimum %d\n",
		       __func__, max_segments);
J
Jens Axboe 已提交
273 274
	}

275
	q->limits.max_phys_segments = max_segments;
J
Jens Axboe 已提交
276 277 278 279 280 281 282 283 284 285 286
}
EXPORT_SYMBOL(blk_queue_max_phys_segments);

/**
 * blk_queue_max_hw_segments - set max hw segments for a request for this queue
 * @q:  the request queue for the device
 * @max_segments:  max number of segments
 *
 * Description:
 *    Enables a low level driver to set an upper limit on the number of
 *    hw data segments in a request.  This would be the largest number of
287
 *    address/length pairs the host adapter can actually give at once
J
Jens Axboe 已提交
288 289 290 291 292 293 294
 *    to the device.
 **/
void blk_queue_max_hw_segments(struct request_queue *q,
			       unsigned short max_segments)
{
	if (!max_segments) {
		max_segments = 1;
295 296
		printk(KERN_INFO "%s: set to minimum %d\n",
		       __func__, max_segments);
J
Jens Axboe 已提交
297 298
	}

299
	q->limits.max_hw_segments = max_segments;
J
Jens Axboe 已提交
300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
}
EXPORT_SYMBOL(blk_queue_max_hw_segments);

/**
 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
 * @q:  the request queue for the device
 * @max_size:  max size of segment in bytes
 *
 * Description:
 *    Enables a low level driver to set an upper limit on the size of a
 *    coalesced segment
 **/
void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
{
	if (max_size < PAGE_CACHE_SIZE) {
		max_size = PAGE_CACHE_SIZE;
316 317
		printk(KERN_INFO "%s: set to minimum %d\n",
		       __func__, max_size);
J
Jens Axboe 已提交
318 319
	}

320
	q->limits.max_segment_size = max_size;
J
Jens Axboe 已提交
321 322 323 324
}
EXPORT_SYMBOL(blk_queue_max_segment_size);

/**
325
 * blk_queue_logical_block_size - set logical block size for the queue
J
Jens Axboe 已提交
326
 * @q:  the request queue for the device
327
 * @size:  the logical block size, in bytes
J
Jens Axboe 已提交
328 329
 *
 * Description:
330 331 332
 *   This should be set to the lowest possible block size that the
 *   storage device can address.  The default of 512 covers most
 *   hardware.
J
Jens Axboe 已提交
333
 **/
334
void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
J
Jens Axboe 已提交
335
{
336
	q->limits.logical_block_size = size;
337 338 339 340 341 342

	if (q->limits.physical_block_size < size)
		q->limits.physical_block_size = size;

	if (q->limits.io_min < q->limits.physical_block_size)
		q->limits.io_min = q->limits.physical_block_size;
J
Jens Axboe 已提交
343
}
344
EXPORT_SYMBOL(blk_queue_logical_block_size);
J
Jens Axboe 已提交
345

346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
/**
 * blk_queue_physical_block_size - set physical block size for the queue
 * @q:  the request queue for the device
 * @size:  the physical block size, in bytes
 *
 * Description:
 *   This should be set to the lowest possible sector size that the
 *   hardware can operate on without reverting to read-modify-write
 *   operations.
 */
void blk_queue_physical_block_size(struct request_queue *q, unsigned short size)
{
	q->limits.physical_block_size = size;

	if (q->limits.physical_block_size < q->limits.logical_block_size)
		q->limits.physical_block_size = q->limits.logical_block_size;

	if (q->limits.io_min < q->limits.physical_block_size)
		q->limits.io_min = q->limits.physical_block_size;
}
EXPORT_SYMBOL(blk_queue_physical_block_size);

/**
 * blk_queue_alignment_offset - set physical block alignment offset
 * @q:	the request queue for the device
371
 * @offset: alignment offset in bytes
372 373 374 375 376 377 378 379 380 381 382 383 384 385 386
 *
 * Description:
 *   Some devices are naturally misaligned to compensate for things like
 *   the legacy DOS partition table 63-sector offset.  Low-level drivers
 *   should call this function for devices whose first sector is not
 *   naturally aligned.
 */
void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
{
	q->limits.alignment_offset =
		offset & (q->limits.physical_block_size - 1);
	q->limits.misaligned = 0;
}
EXPORT_SYMBOL(blk_queue_alignment_offset);

387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
/**
 * blk_limits_io_min - set minimum request size for a device
 * @limits: the queue limits
 * @min:  smallest I/O size in bytes
 *
 * Description:
 *   Some devices have an internal block size bigger than the reported
 *   hardware sector size.  This function can be used to signal the
 *   smallest I/O the device can perform without incurring a performance
 *   penalty.
 */
void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
{
	limits->io_min = min;

	if (limits->io_min < limits->logical_block_size)
		limits->io_min = limits->logical_block_size;

	if (limits->io_min < limits->physical_block_size)
		limits->io_min = limits->physical_block_size;
}
EXPORT_SYMBOL(blk_limits_io_min);

410 411 412
/**
 * blk_queue_io_min - set minimum request size for the queue
 * @q:	the request queue for the device
413
 * @min:  smallest I/O size in bytes
414 415
 *
 * Description:
416 417 418 419 420 421 422
 *   Storage devices may report a granularity or preferred minimum I/O
 *   size which is the smallest request the device can perform without
 *   incurring a performance penalty.  For disk drives this is often the
 *   physical block size.  For RAID arrays it is often the stripe chunk
 *   size.  A properly aligned multiple of minimum_io_size is the
 *   preferred request size for workloads where a high number of I/O
 *   operations is desired.
423 424 425
 */
void blk_queue_io_min(struct request_queue *q, unsigned int min)
{
426
	blk_limits_io_min(&q->limits, min);
427 428 429 430 431 432
}
EXPORT_SYMBOL(blk_queue_io_min);

/**
 * blk_queue_io_opt - set optimal request size for the queue
 * @q:	the request queue for the device
433
 * @opt:  optimal request size in bytes
434 435
 *
 * Description:
436 437 438 439 440 441
 *   Storage devices may report an optimal I/O size, which is the
 *   device's preferred unit for sustained I/O.  This is rarely reported
 *   for disk drives.  For RAID arrays it is usually the stripe width or
 *   the internal track size.  A properly aligned multiple of
 *   optimal_io_size is the preferred request size for workloads where
 *   sustained throughput is desired.
442 443 444 445 446 447 448
 */
void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
{
	q->limits.io_opt = opt;
}
EXPORT_SYMBOL(blk_queue_io_opt);

J
Jens Axboe 已提交
449 450 451 452 453 454 455 456 457 458 459 460
/*
 * Returns the minimum that is _not_ zero, unless both are zero.
 */
#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))

/**
 * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
 * @t:	the stacking driver (top)
 * @b:  the underlying device (bottom)
 **/
void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
{
461
	blk_stack_limits(&t->limits, &b->limits, 0);
462

463 464 465 466 467
	if (!t->queue_lock)
		WARN_ON_ONCE(1);
	else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
		unsigned long flags;
		spin_lock_irqsave(t->queue_lock, flags);
N
Nick Piggin 已提交
468
		queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
469 470
		spin_unlock_irqrestore(t->queue_lock, flags);
	}
J
Jens Axboe 已提交
471 472 473
}
EXPORT_SYMBOL(blk_queue_stack_limits);

474 475 476
/**
 * blk_stack_limits - adjust queue_limits for stacked devices
 * @t:	the stacking driver limits (top)
477
 * @b:  the underlying queue limits (bottom)
478 479 480 481 482 483 484 485 486 487 488 489
 * @offset:  offset to beginning of data within component device
 *
 * Description:
 *    Merges two queue_limit structs.  Returns 0 if alignment didn't
 *    change.  Returns -1 if adding the bottom device caused
 *    misalignment.
 */
int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
		     sector_t offset)
{
	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
490
	t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530

	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
					    b->seg_boundary_mask);

	t->max_phys_segments = min_not_zero(t->max_phys_segments,
					    b->max_phys_segments);

	t->max_hw_segments = min_not_zero(t->max_hw_segments,
					  b->max_hw_segments);

	t->max_segment_size = min_not_zero(t->max_segment_size,
					   b->max_segment_size);

	t->logical_block_size = max(t->logical_block_size,
				    b->logical_block_size);

	t->physical_block_size = max(t->physical_block_size,
				     b->physical_block_size);

	t->io_min = max(t->io_min, b->io_min);
	t->no_cluster |= b->no_cluster;

	/* Bottom device offset aligned? */
	if (offset &&
	    (offset & (b->physical_block_size - 1)) != b->alignment_offset) {
		t->misaligned = 1;
		return -1;
	}

	/* If top has no alignment offset, inherit from bottom */
	if (!t->alignment_offset)
		t->alignment_offset =
			b->alignment_offset & (b->physical_block_size - 1);

	/* Top device aligned on logical block boundary? */
	if (t->alignment_offset & (t->logical_block_size - 1)) {
		t->misaligned = 1;
		return -1;
	}

531 532 533 534 535 536 537 538 539 540
	/* Find lcm() of optimal I/O size */
	if (t->io_opt && b->io_opt)
		t->io_opt = (t->io_opt * b->io_opt) / gcd(t->io_opt, b->io_opt);
	else if (b->io_opt)
		t->io_opt = b->io_opt;

	/* Verify that optimal I/O size is a multiple of io_min */
	if (t->io_min && t->io_opt % t->io_min)
		return -1;

541 542
	return 0;
}
M
Mike Snitzer 已提交
543
EXPORT_SYMBOL(blk_stack_limits);
544 545 546

/**
 * disk_stack_limits - adjust queue limits for stacked drivers
547
 * @disk:  MD/DM gendisk (top)
548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586
 * @bdev:  the underlying block device (bottom)
 * @offset:  offset to beginning of data within component device
 *
 * Description:
 *    Merges the limits for two queues.  Returns 0 if alignment
 *    didn't change.  Returns -1 if adding the bottom device caused
 *    misalignment.
 */
void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
		       sector_t offset)
{
	struct request_queue *t = disk->queue;
	struct request_queue *b = bdev_get_queue(bdev);

	offset += get_start_sect(bdev) << 9;

	if (blk_stack_limits(&t->limits, &b->limits, offset) < 0) {
		char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];

		disk_name(disk, 0, top);
		bdevname(bdev, bottom);

		printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
		       top, bottom);
	}

	if (!t->queue_lock)
		WARN_ON_ONCE(1);
	else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
		unsigned long flags;

		spin_lock_irqsave(t->queue_lock, flags);
		if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
			queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
		spin_unlock_irqrestore(t->queue_lock, flags);
	}
}
EXPORT_SYMBOL(disk_stack_limits);

587 588 589 590 591
/**
 * blk_queue_dma_pad - set pad mask
 * @q:     the request queue for the device
 * @mask:  pad mask
 *
592
 * Set dma pad mask.
593
 *
594 595
 * Appending pad buffer to a request modifies the last entry of a
 * scatter list such that it includes the pad buffer.
596 597 598 599 600 601 602
 **/
void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
{
	q->dma_pad_mask = mask;
}
EXPORT_SYMBOL(blk_queue_dma_pad);

603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619
/**
 * blk_queue_update_dma_pad - update pad mask
 * @q:     the request queue for the device
 * @mask:  pad mask
 *
 * Update dma pad mask.
 *
 * Appending pad buffer to a request modifies the last entry of a
 * scatter list such that it includes the pad buffer.
 **/
void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
{
	if (mask > q->dma_pad_mask)
		q->dma_pad_mask = mask;
}
EXPORT_SYMBOL(blk_queue_update_dma_pad);

J
Jens Axboe 已提交
620 621 622
/**
 * blk_queue_dma_drain - Set up a drain buffer for excess dma.
 * @q:  the request queue for the device
623
 * @dma_drain_needed: fn which returns non-zero if drain is necessary
J
Jens Axboe 已提交
624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642
 * @buf:	physically contiguous buffer
 * @size:	size of the buffer in bytes
 *
 * Some devices have excess DMA problems and can't simply discard (or
 * zero fill) the unwanted piece of the transfer.  They have to have a
 * real area of memory to transfer it into.  The use case for this is
 * ATAPI devices in DMA mode.  If the packet command causes a transfer
 * bigger than the transfer size some HBAs will lock up if there
 * aren't DMA elements to contain the excess transfer.  What this API
 * does is adjust the queue so that the buf is always appended
 * silently to the scatterlist.
 *
 * Note: This routine adjusts max_hw_segments to make room for
 * appending the drain buffer.  If you call
 * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after
 * calling this routine, you must set the limit to one fewer than your
 * device can support otherwise there won't be room for the drain
 * buffer.
 */
643
int blk_queue_dma_drain(struct request_queue *q,
644 645
			       dma_drain_needed_fn *dma_drain_needed,
			       void *buf, unsigned int size)
J
Jens Axboe 已提交
646
{
647
	if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2)
J
Jens Axboe 已提交
648 649
		return -EINVAL;
	/* make room for appending the drain */
650 651
	blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1);
	blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1);
652
	q->dma_drain_needed = dma_drain_needed;
J
Jens Axboe 已提交
653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668
	q->dma_drain_buffer = buf;
	q->dma_drain_size = size;

	return 0;
}
EXPORT_SYMBOL_GPL(blk_queue_dma_drain);

/**
 * blk_queue_segment_boundary - set boundary rules for segment merging
 * @q:  the request queue for the device
 * @mask:  the memory boundary mask
 **/
void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
{
	if (mask < PAGE_CACHE_SIZE - 1) {
		mask = PAGE_CACHE_SIZE - 1;
669 670
		printk(KERN_INFO "%s: set to minimum %lx\n",
		       __func__, mask);
J
Jens Axboe 已提交
671 672
	}

673
	q->limits.seg_boundary_mask = mask;
J
Jens Axboe 已提交
674 675 676 677 678 679 680 681 682
}
EXPORT_SYMBOL(blk_queue_segment_boundary);

/**
 * blk_queue_dma_alignment - set dma length and memory alignment
 * @q:     the request queue for the device
 * @mask:  alignment mask
 *
 * description:
683
 *    set required memory and length alignment for direct dma transactions.
A
Alan Cox 已提交
684
 *    this is used when building direct io requests for the queue.
J
Jens Axboe 已提交
685 686 687 688 689 690 691 692 693 694 695 696 697 698
 *
 **/
void blk_queue_dma_alignment(struct request_queue *q, int mask)
{
	q->dma_alignment = mask;
}
EXPORT_SYMBOL(blk_queue_dma_alignment);

/**
 * blk_queue_update_dma_alignment - update dma length and memory alignment
 * @q:     the request queue for the device
 * @mask:  alignment mask
 *
 * description:
699
 *    update required memory and length alignment for direct dma transactions.
J
Jens Axboe 已提交
700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715
 *    If the requested alignment is larger than the current alignment, then
 *    the current queue alignment is updated to the new value, otherwise it
 *    is left alone.  The design of this is to allow multiple objects
 *    (driver, device, transport etc) to set their respective
 *    alignments without having them interfere.
 *
 **/
void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
{
	BUG_ON(mask > PAGE_SIZE);

	if (mask > q->dma_alignment)
		q->dma_alignment = mask;
}
EXPORT_SYMBOL(blk_queue_update_dma_alignment);

716
static int __init blk_settings_init(void)
J
Jens Axboe 已提交
717 718 719 720 721 722
{
	blk_max_low_pfn = max_low_pfn - 1;
	blk_max_pfn = max_pfn - 1;
	return 0;
}
subsys_initcall(blk_settings_init);