blk-settings.c 24.6 KB
Newer Older
J
Jens Axboe 已提交
1 2 3 4 5 6 7 8 9
/*
 * Functions related to setting various queue properties from drivers
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
10
#include <linux/gcd.h>
11
#include <linux/lcm.h>
R
Randy Dunlap 已提交
12
#include <linux/jiffies.h>
13
#include <linux/gfp.h>
J
Jens Axboe 已提交
14 15 16

#include "blk.h"

17
unsigned long blk_max_low_pfn;
J
Jens Axboe 已提交
18
EXPORT_SYMBOL(blk_max_low_pfn);
19 20

unsigned long blk_max_pfn;
J
Jens Axboe 已提交
21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66

/**
 * blk_queue_prep_rq - set a prepare_request function for queue
 * @q:		queue
 * @pfn:	prepare_request function
 *
 * It's possible for a queue to register a prepare_request callback which
 * is invoked before the request is handed to the request_fn. The goal of
 * the function is to prepare a request for I/O, it can be used to build a
 * cdb from the request data for instance.
 *
 */
void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
{
	q->prep_rq_fn = pfn;
}
EXPORT_SYMBOL(blk_queue_prep_rq);

/**
 * blk_queue_merge_bvec - set a merge_bvec function for queue
 * @q:		queue
 * @mbfn:	merge_bvec_fn
 *
 * Usually queues have static limitations on the max sectors or segments that
 * we can put in a request. Stacking drivers may have some settings that
 * are dynamic, and thus we have to query the queue whether it is ok to
 * add a new bio_vec to a bio at a given offset or not. If the block device
 * has such limitations, it needs to register a merge_bvec_fn to control
 * the size of bio's sent to it. Note that a block device *must* allow a
 * single page to be added to an empty bio. The block device driver may want
 * to use the bio_split() function to deal with these bio's. By default
 * no merge_bvec_fn is defined for a queue, and only the fixed limits are
 * honored.
 */
void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
{
	q->merge_bvec_fn = mbfn;
}
EXPORT_SYMBOL(blk_queue_merge_bvec);

void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
{
	q->softirq_done_fn = fn;
}
EXPORT_SYMBOL(blk_queue_softirq_done);

J
Jens Axboe 已提交
67 68 69 70 71 72 73 74 75 76 77 78
void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
{
	q->rq_timeout = timeout;
}
EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);

void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
{
	q->rq_timed_out_fn = fn;
}
EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);

79 80 81 82 83 84
void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
{
	q->lld_busy_fn = fn;
}
EXPORT_SYMBOL_GPL(blk_queue_lld_busy);

85 86
/**
 * blk_set_default_limits - reset limits to default values
87
 * @lim:  the queue_limits structure to reset
88 89 90 91 92 93 94 95
 *
 * Description:
 *   Returns a queue_limit struct to its default state.  Can be used by
 *   stacking drivers like DM that stage table swaps and reuse an
 *   existing device queue.
 */
void blk_set_default_limits(struct queue_limits *lim)
{
96
	lim->max_segments = BLK_MAX_SEGMENTS;
97
	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
98
	lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
99 100
	lim->max_sectors = BLK_DEF_MAX_SECTORS;
	lim->max_hw_sectors = INT_MAX;
101 102 103 104
	lim->max_discard_sectors = 0;
	lim->discard_granularity = 0;
	lim->discard_alignment = 0;
	lim->discard_misaligned = 0;
105
	lim->discard_zeroes_data = -1;
106
	lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
107
	lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
108 109 110 111 112 113 114
	lim->alignment_offset = 0;
	lim->io_opt = 0;
	lim->misaligned = 0;
	lim->no_cluster = 0;
}
EXPORT_SYMBOL(blk_set_default_limits);

J
Jens Axboe 已提交
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
/**
 * blk_queue_make_request - define an alternate make_request function for a device
 * @q:  the request queue for the device to be affected
 * @mfn: the alternate make_request function
 *
 * Description:
 *    The normal way for &struct bios to be passed to a device
 *    driver is for them to be collected into requests on a request
 *    queue, and then to allow the device driver to select requests
 *    off that queue when it is ready.  This works well for many block
 *    devices. However some block devices (typically virtual devices
 *    such as md or lvm) do not benefit from the processing on the
 *    request queue, and are served best by having the requests passed
 *    directly to them.  This can be achieved by providing a function
 *    to blk_queue_make_request().
 *
 * Caveat:
 *    The driver that does this *must* be able to deal appropriately
 *    with buffers in "highmemory". This can be accomplished by either calling
 *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
 *    blk_queue_bounce() to create a buffer in normal memory.
 **/
137
void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
J
Jens Axboe 已提交
138 139 140 141 142
{
	/*
	 * set defaults
	 */
	q->nr_requests = BLKDEV_MAX_RQ;
143

J
Jens Axboe 已提交
144 145 146 147 148 149
	q->make_request_fn = mfn;
	blk_queue_dma_alignment(q, 511);
	blk_queue_congestion_threshold(q);
	q->nr_batching = BLK_BATCH_REQ;

	q->unplug_thresh = 4;		/* hmm */
R
Randy Dunlap 已提交
150
	q->unplug_delay = msecs_to_jiffies(3);	/* 3 milliseconds */
J
Jens Axboe 已提交
151 152 153 154 155 156
	if (q->unplug_delay == 0)
		q->unplug_delay = 1;

	q->unplug_timer.function = blk_unplug_timeout;
	q->unplug_timer.data = (unsigned long)q;

157
	blk_set_default_limits(&q->limits);
158
	blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
159

160 161 162 163 164 165 166
	/*
	 * If the caller didn't supply a lock, fall back to our embedded
	 * per-queue locks
	 */
	if (!q->queue_lock)
		q->queue_lock = &q->__queue_lock;

J
Jens Axboe 已提交
167 168 169 170 171 172 173 174 175
	/*
	 * by default assume old behaviour and bounce for any highmem page
	 */
	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
}
EXPORT_SYMBOL(blk_queue_make_request);

/**
 * blk_queue_bounce_limit - set bounce buffer limit for queue
176 177
 * @q: the request queue for the device
 * @dma_mask: the maximum address the device can handle
J
Jens Axboe 已提交
178 179 180 181 182
 *
 * Description:
 *    Different hardware can have different requirements as to what pages
 *    it can do I/O directly to. A low level driver can call
 *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
183
 *    buffers for doing I/O to pages residing above @dma_mask.
J
Jens Axboe 已提交
184
 **/
185
void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
J
Jens Axboe 已提交
186
{
187
	unsigned long b_pfn = dma_mask >> PAGE_SHIFT;
J
Jens Axboe 已提交
188 189 190 191
	int dma = 0;

	q->bounce_gfp = GFP_NOIO;
#if BITS_PER_LONG == 64
192 193 194 195 196 197
	/*
	 * Assume anything <= 4GB can be handled by IOMMU.  Actually
	 * some IOMMUs can handle everything, but I don't know of a
	 * way to test this here.
	 */
	if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
J
Jens Axboe 已提交
198
		dma = 1;
199
	q->limits.bounce_pfn = max_low_pfn;
J
Jens Axboe 已提交
200
#else
201
	if (b_pfn < blk_max_low_pfn)
J
Jens Axboe 已提交
202
		dma = 1;
203
	q->limits.bounce_pfn = b_pfn;
J
Jens Axboe 已提交
204 205 206 207
#endif
	if (dma) {
		init_emergency_isa_pool();
		q->bounce_gfp = GFP_NOIO | GFP_DMA;
208
		q->limits.bounce_pfn = b_pfn;
J
Jens Axboe 已提交
209 210 211 212 213
	}
}
EXPORT_SYMBOL(blk_queue_bounce_limit);

/**
214
 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
J
Jens Axboe 已提交
215
 * @q:  the request queue for the device
216
 * @max_hw_sectors:  max hardware sectors in the usual 512b unit
J
Jens Axboe 已提交
217 218
 *
 * Description:
219 220 221 222 223 224 225 226 227
 *    Enables a low level driver to set a hard upper limit,
 *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
 *    the device driver based upon the combined capabilities of I/O
 *    controller and storage device.
 *
 *    max_sectors is a soft limit imposed by the block layer for
 *    filesystem type requests.  This value can be overridden on a
 *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.
 *    The soft limit can not exceed max_hw_sectors.
J
Jens Axboe 已提交
228
 **/
229
void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
J
Jens Axboe 已提交
230
{
231 232
	if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
		max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
233
		printk(KERN_INFO "%s: set to minimum %d\n",
234
		       __func__, max_hw_sectors);
J
Jens Axboe 已提交
235 236
	}

237 238 239
	q->limits.max_hw_sectors = max_hw_sectors;
	q->limits.max_sectors = min_t(unsigned int, max_hw_sectors,
				      BLK_DEF_MAX_SECTORS);
J
Jens Axboe 已提交
240
}
241
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
J
Jens Axboe 已提交
242

243 244 245
/**
 * blk_queue_max_discard_sectors - set max sectors for a single discard
 * @q:  the request queue for the device
246
 * @max_discard_sectors: maximum number of sectors to discard
247 248 249 250 251 252 253 254
 **/
void blk_queue_max_discard_sectors(struct request_queue *q,
		unsigned int max_discard_sectors)
{
	q->limits.max_discard_sectors = max_discard_sectors;
}
EXPORT_SYMBOL(blk_queue_max_discard_sectors);

J
Jens Axboe 已提交
255
/**
256
 * blk_queue_max_segments - set max hw segments for a request for this queue
J
Jens Axboe 已提交
257 258 259 260 261
 * @q:  the request queue for the device
 * @max_segments:  max number of segments
 *
 * Description:
 *    Enables a low level driver to set an upper limit on the number of
262
 *    hw data segments in a request.
J
Jens Axboe 已提交
263
 **/
264
void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
J
Jens Axboe 已提交
265 266 267
{
	if (!max_segments) {
		max_segments = 1;
268 269
		printk(KERN_INFO "%s: set to minimum %d\n",
		       __func__, max_segments);
J
Jens Axboe 已提交
270 271
	}

272
	q->limits.max_segments = max_segments;
J
Jens Axboe 已提交
273
}
274
EXPORT_SYMBOL(blk_queue_max_segments);
J
Jens Axboe 已提交
275 276 277 278 279 280 281 282 283 284 285 286 287 288

/**
 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
 * @q:  the request queue for the device
 * @max_size:  max size of segment in bytes
 *
 * Description:
 *    Enables a low level driver to set an upper limit on the size of a
 *    coalesced segment
 **/
void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
{
	if (max_size < PAGE_CACHE_SIZE) {
		max_size = PAGE_CACHE_SIZE;
289 290
		printk(KERN_INFO "%s: set to minimum %d\n",
		       __func__, max_size);
J
Jens Axboe 已提交
291 292
	}

293
	q->limits.max_segment_size = max_size;
J
Jens Axboe 已提交
294 295 296 297
}
EXPORT_SYMBOL(blk_queue_max_segment_size);

/**
298
 * blk_queue_logical_block_size - set logical block size for the queue
J
Jens Axboe 已提交
299
 * @q:  the request queue for the device
300
 * @size:  the logical block size, in bytes
J
Jens Axboe 已提交
301 302
 *
 * Description:
303 304 305
 *   This should be set to the lowest possible block size that the
 *   storage device can address.  The default of 512 covers most
 *   hardware.
J
Jens Axboe 已提交
306
 **/
307
void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
J
Jens Axboe 已提交
308
{
309
	q->limits.logical_block_size = size;
310 311 312 313 314 315

	if (q->limits.physical_block_size < size)
		q->limits.physical_block_size = size;

	if (q->limits.io_min < q->limits.physical_block_size)
		q->limits.io_min = q->limits.physical_block_size;
J
Jens Axboe 已提交
316
}
317
EXPORT_SYMBOL(blk_queue_logical_block_size);
J
Jens Axboe 已提交
318

319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
/**
 * blk_queue_physical_block_size - set physical block size for the queue
 * @q:  the request queue for the device
 * @size:  the physical block size, in bytes
 *
 * Description:
 *   This should be set to the lowest possible sector size that the
 *   hardware can operate on without reverting to read-modify-write
 *   operations.
 */
void blk_queue_physical_block_size(struct request_queue *q, unsigned short size)
{
	q->limits.physical_block_size = size;

	if (q->limits.physical_block_size < q->limits.logical_block_size)
		q->limits.physical_block_size = q->limits.logical_block_size;

	if (q->limits.io_min < q->limits.physical_block_size)
		q->limits.io_min = q->limits.physical_block_size;
}
EXPORT_SYMBOL(blk_queue_physical_block_size);

/**
 * blk_queue_alignment_offset - set physical block alignment offset
 * @q:	the request queue for the device
344
 * @offset: alignment offset in bytes
345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
 *
 * Description:
 *   Some devices are naturally misaligned to compensate for things like
 *   the legacy DOS partition table 63-sector offset.  Low-level drivers
 *   should call this function for devices whose first sector is not
 *   naturally aligned.
 */
void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
{
	q->limits.alignment_offset =
		offset & (q->limits.physical_block_size - 1);
	q->limits.misaligned = 0;
}
EXPORT_SYMBOL(blk_queue_alignment_offset);

360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382
/**
 * blk_limits_io_min - set minimum request size for a device
 * @limits: the queue limits
 * @min:  smallest I/O size in bytes
 *
 * Description:
 *   Some devices have an internal block size bigger than the reported
 *   hardware sector size.  This function can be used to signal the
 *   smallest I/O the device can perform without incurring a performance
 *   penalty.
 */
void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
{
	limits->io_min = min;

	if (limits->io_min < limits->logical_block_size)
		limits->io_min = limits->logical_block_size;

	if (limits->io_min < limits->physical_block_size)
		limits->io_min = limits->physical_block_size;
}
EXPORT_SYMBOL(blk_limits_io_min);

383 384 385
/**
 * blk_queue_io_min - set minimum request size for the queue
 * @q:	the request queue for the device
386
 * @min:  smallest I/O size in bytes
387 388
 *
 * Description:
389 390 391 392 393 394 395
 *   Storage devices may report a granularity or preferred minimum I/O
 *   size which is the smallest request the device can perform without
 *   incurring a performance penalty.  For disk drives this is often the
 *   physical block size.  For RAID arrays it is often the stripe chunk
 *   size.  A properly aligned multiple of minimum_io_size is the
 *   preferred request size for workloads where a high number of I/O
 *   operations is desired.
396 397 398
 */
void blk_queue_io_min(struct request_queue *q, unsigned int min)
{
399
	blk_limits_io_min(&q->limits, min);
400 401 402
}
EXPORT_SYMBOL(blk_queue_io_min);

403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
/**
 * blk_limits_io_opt - set optimal request size for a device
 * @limits: the queue limits
 * @opt:  smallest I/O size in bytes
 *
 * Description:
 *   Storage devices may report an optimal I/O size, which is the
 *   device's preferred unit for sustained I/O.  This is rarely reported
 *   for disk drives.  For RAID arrays it is usually the stripe width or
 *   the internal track size.  A properly aligned multiple of
 *   optimal_io_size is the preferred request size for workloads where
 *   sustained throughput is desired.
 */
void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
{
	limits->io_opt = opt;
}
EXPORT_SYMBOL(blk_limits_io_opt);

422 423 424
/**
 * blk_queue_io_opt - set optimal request size for the queue
 * @q:	the request queue for the device
425
 * @opt:  optimal request size in bytes
426 427
 *
 * Description:
428 429 430 431 432 433
 *   Storage devices may report an optimal I/O size, which is the
 *   device's preferred unit for sustained I/O.  This is rarely reported
 *   for disk drives.  For RAID arrays it is usually the stripe width or
 *   the internal track size.  A properly aligned multiple of
 *   optimal_io_size is the preferred request size for workloads where
 *   sustained throughput is desired.
434 435 436
 */
void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
{
437
	blk_limits_io_opt(&q->limits, opt);
438 439 440
}
EXPORT_SYMBOL(blk_queue_io_opt);

J
Jens Axboe 已提交
441 442 443 444 445 446 447 448 449 450 451 452
/*
 * Returns the minimum that is _not_ zero, unless both are zero.
 */
#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))

/**
 * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
 * @t:	the stacking driver (top)
 * @b:  the underlying device (bottom)
 **/
void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
{
453
	blk_stack_limits(&t->limits, &b->limits, 0);
454

455 456 457 458 459
	if (!t->queue_lock)
		WARN_ON_ONCE(1);
	else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
		unsigned long flags;
		spin_lock_irqsave(t->queue_lock, flags);
N
Nick Piggin 已提交
460
		queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
461 462
		spin_unlock_irqrestore(t->queue_lock, flags);
	}
J
Jens Axboe 已提交
463 464 465
}
EXPORT_SYMBOL(blk_queue_stack_limits);

466 467
/**
 * blk_stack_limits - adjust queue_limits for stacked devices
468 469
 * @t:	the stacking driver limits (top device)
 * @b:  the underlying queue limits (bottom, component device)
470
 * @start:  first data sector within component device
471 472
 *
 * Description:
473 474 475 476 477 478 479 480 481 482 483 484 485
 *    This function is used by stacking drivers like MD and DM to ensure
 *    that all component devices have compatible block sizes and
 *    alignments.  The stacking driver must provide a queue_limits
 *    struct (top) and then iteratively call the stacking function for
 *    all component (bottom) devices.  The stacking function will
 *    attempt to combine the values and ensure proper alignment.
 *
 *    Returns 0 if the top and bottom queue_limits are compatible.  The
 *    top device's block sizes and alignment offsets may be adjusted to
 *    ensure alignment with the bottom device. If no compatible sizes
 *    and alignments exist, -1 is returned and the resulting top
 *    queue_limits will have the misaligned flag set to indicate that
 *    the alignment_offset is undefined.
486 487
 */
int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
488
		     sector_t start)
489
{
490
	unsigned int top, bottom, alignment, ret = 0;
491

492 493
	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
494
	t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
495 496 497 498

	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
					    b->seg_boundary_mask);

499
	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
500 501 502 503

	t->max_segment_size = min_not_zero(t->max_segment_size,
					   b->max_segment_size);

504 505
	t->misaligned |= b->misaligned;

506
	alignment = queue_limit_alignment_offset(b, start);
507

508 509 510
	/* Bottom device has different alignment.  Check that it is
	 * compatible with the current top alignment.
	 */
511 512 513 514
	if (t->alignment_offset != alignment) {

		top = max(t->physical_block_size, t->io_min)
			+ t->alignment_offset;
515
		bottom = max(b->physical_block_size, b->io_min) + alignment;
516

517
		/* Verify that top and bottom intervals line up */
518
		if (max(top, bottom) & (min(top, bottom) - 1)) {
519
			t->misaligned = 1;
520 521
			ret = -1;
		}
522 523
	}

524 525 526 527 528 529 530
	t->logical_block_size = max(t->logical_block_size,
				    b->logical_block_size);

	t->physical_block_size = max(t->physical_block_size,
				     b->physical_block_size);

	t->io_min = max(t->io_min, b->io_min);
531 532
	t->io_opt = lcm(t->io_opt, b->io_opt);

533
	t->no_cluster |= b->no_cluster;
534
	t->discard_zeroes_data &= b->discard_zeroes_data;
535

536
	/* Physical block size a multiple of the logical block size? */
537 538
	if (t->physical_block_size & (t->logical_block_size - 1)) {
		t->physical_block_size = t->logical_block_size;
539
		t->misaligned = 1;
540
		ret = -1;
541 542
	}

543
	/* Minimum I/O a multiple of the physical block size? */
544 545 546
	if (t->io_min & (t->physical_block_size - 1)) {
		t->io_min = t->physical_block_size;
		t->misaligned = 1;
547
		ret = -1;
548 549
	}

550
	/* Optimal I/O a multiple of the physical block size? */
551 552 553
	if (t->io_opt & (t->physical_block_size - 1)) {
		t->io_opt = 0;
		t->misaligned = 1;
554
		ret = -1;
555
	}
556

557
	/* Find lowest common alignment_offset */
558 559
	t->alignment_offset = lcm(t->alignment_offset, alignment)
		& (max(t->physical_block_size, t->io_min) - 1);
560

561
	/* Verify that new alignment_offset is on a logical block boundary */
562
	if (t->alignment_offset & (t->logical_block_size - 1)) {
563
		t->misaligned = 1;
564 565
		ret = -1;
	}
566

567 568
	/* Discard alignment and granularity */
	if (b->discard_granularity) {
569
		alignment = queue_limit_discard_alignment(b, start);
570 571 572 573 574

		if (t->discard_granularity != 0 &&
		    t->discard_alignment != alignment) {
			top = t->discard_granularity + t->discard_alignment;
			bottom = b->discard_granularity + alignment;
575

576 577 578 579 580
			/* Verify that top and bottom intervals line up */
			if (max(top, bottom) & (min(top, bottom) - 1))
				t->discard_misaligned = 1;
		}

581 582
		t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
						      b->max_discard_sectors);
583 584 585 586 587
		t->discard_granularity = max(t->discard_granularity,
					     b->discard_granularity);
		t->discard_alignment = lcm(t->discard_alignment, alignment) &
			(t->discard_granularity - 1);
	}
588

589
	return ret;
590
}
M
Mike Snitzer 已提交
591
EXPORT_SYMBOL(blk_stack_limits);
592

593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610
/**
 * bdev_stack_limits - adjust queue limits for stacked drivers
 * @t:	the stacking driver limits (top device)
 * @bdev:  the component block_device (bottom)
 * @start:  first data sector within component device
 *
 * Description:
 *    Merges queue limits for a top device and a block_device.  Returns
 *    0 if alignment didn't change.  Returns -1 if adding the bottom
 *    device caused misalignment.
 */
int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
		      sector_t start)
{
	struct request_queue *bq = bdev_get_queue(bdev);

	start += get_start_sect(bdev);

611
	return blk_stack_limits(t, &bq->limits, start);
612 613 614
}
EXPORT_SYMBOL(bdev_stack_limits);

615 616
/**
 * disk_stack_limits - adjust queue limits for stacked drivers
617
 * @disk:  MD/DM gendisk (top)
618 619 620 621
 * @bdev:  the underlying block device (bottom)
 * @offset:  offset to beginning of data within component device
 *
 * Description:
622 623
 *    Merges the limits for a top level gendisk and a bottom level
 *    block_device.
624 625 626 627 628 629 630
 */
void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
		       sector_t offset)
{
	struct request_queue *t = disk->queue;
	struct request_queue *b = bdev_get_queue(bdev);

631
	if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653
		char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];

		disk_name(disk, 0, top);
		bdevname(bdev, bottom);

		printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
		       top, bottom);
	}

	if (!t->queue_lock)
		WARN_ON_ONCE(1);
	else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
		unsigned long flags;

		spin_lock_irqsave(t->queue_lock, flags);
		if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
			queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
		spin_unlock_irqrestore(t->queue_lock, flags);
	}
}
EXPORT_SYMBOL(disk_stack_limits);

654 655 656 657 658
/**
 * blk_queue_dma_pad - set pad mask
 * @q:     the request queue for the device
 * @mask:  pad mask
 *
659
 * Set dma pad mask.
660
 *
661 662
 * Appending pad buffer to a request modifies the last entry of a
 * scatter list such that it includes the pad buffer.
663 664 665 666 667 668 669
 **/
void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
{
	q->dma_pad_mask = mask;
}
EXPORT_SYMBOL(blk_queue_dma_pad);

670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686
/**
 * blk_queue_update_dma_pad - update pad mask
 * @q:     the request queue for the device
 * @mask:  pad mask
 *
 * Update dma pad mask.
 *
 * Appending pad buffer to a request modifies the last entry of a
 * scatter list such that it includes the pad buffer.
 **/
void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
{
	if (mask > q->dma_pad_mask)
		q->dma_pad_mask = mask;
}
EXPORT_SYMBOL(blk_queue_update_dma_pad);

J
Jens Axboe 已提交
687 688 689
/**
 * blk_queue_dma_drain - Set up a drain buffer for excess dma.
 * @q:  the request queue for the device
690
 * @dma_drain_needed: fn which returns non-zero if drain is necessary
J
Jens Axboe 已提交
691 692 693 694 695 696 697 698 699 700 701 702
 * @buf:	physically contiguous buffer
 * @size:	size of the buffer in bytes
 *
 * Some devices have excess DMA problems and can't simply discard (or
 * zero fill) the unwanted piece of the transfer.  They have to have a
 * real area of memory to transfer it into.  The use case for this is
 * ATAPI devices in DMA mode.  If the packet command causes a transfer
 * bigger than the transfer size some HBAs will lock up if there
 * aren't DMA elements to contain the excess transfer.  What this API
 * does is adjust the queue so that the buf is always appended
 * silently to the scatterlist.
 *
703 704 705 706
 * Note: This routine adjusts max_hw_segments to make room for appending
 * the drain buffer.  If you call blk_queue_max_segments() after calling
 * this routine, you must set the limit to one fewer than your device
 * can support otherwise there won't be room for the drain buffer.
J
Jens Axboe 已提交
707
 */
708
int blk_queue_dma_drain(struct request_queue *q,
709 710
			       dma_drain_needed_fn *dma_drain_needed,
			       void *buf, unsigned int size)
J
Jens Axboe 已提交
711
{
712
	if (queue_max_segments(q) < 2)
J
Jens Axboe 已提交
713 714
		return -EINVAL;
	/* make room for appending the drain */
715
	blk_queue_max_segments(q, queue_max_segments(q) - 1);
716
	q->dma_drain_needed = dma_drain_needed;
J
Jens Axboe 已提交
717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732
	q->dma_drain_buffer = buf;
	q->dma_drain_size = size;

	return 0;
}
EXPORT_SYMBOL_GPL(blk_queue_dma_drain);

/**
 * blk_queue_segment_boundary - set boundary rules for segment merging
 * @q:  the request queue for the device
 * @mask:  the memory boundary mask
 **/
void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
{
	if (mask < PAGE_CACHE_SIZE - 1) {
		mask = PAGE_CACHE_SIZE - 1;
733 734
		printk(KERN_INFO "%s: set to minimum %lx\n",
		       __func__, mask);
J
Jens Axboe 已提交
735 736
	}

737
	q->limits.seg_boundary_mask = mask;
J
Jens Axboe 已提交
738 739 740 741 742 743 744 745 746
}
EXPORT_SYMBOL(blk_queue_segment_boundary);

/**
 * blk_queue_dma_alignment - set dma length and memory alignment
 * @q:     the request queue for the device
 * @mask:  alignment mask
 *
 * description:
747
 *    set required memory and length alignment for direct dma transactions.
A
Alan Cox 已提交
748
 *    this is used when building direct io requests for the queue.
J
Jens Axboe 已提交
749 750 751 752 753 754 755 756 757 758 759 760 761 762
 *
 **/
void blk_queue_dma_alignment(struct request_queue *q, int mask)
{
	q->dma_alignment = mask;
}
EXPORT_SYMBOL(blk_queue_dma_alignment);

/**
 * blk_queue_update_dma_alignment - update dma length and memory alignment
 * @q:     the request queue for the device
 * @mask:  alignment mask
 *
 * description:
763
 *    update required memory and length alignment for direct dma transactions.
J
Jens Axboe 已提交
764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779
 *    If the requested alignment is larger than the current alignment, then
 *    the current queue alignment is updated to the new value, otherwise it
 *    is left alone.  The design of this is to allow multiple objects
 *    (driver, device, transport etc) to set their respective
 *    alignments without having them interfere.
 *
 **/
void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
{
	BUG_ON(mask > PAGE_SIZE);

	if (mask > q->dma_alignment)
		q->dma_alignment = mask;
}
EXPORT_SYMBOL(blk_queue_update_dma_alignment);

780
static int __init blk_settings_init(void)
J
Jens Axboe 已提交
781 782 783 784 785 786
{
	blk_max_low_pfn = max_low_pfn - 1;
	blk_max_pfn = max_pfn - 1;
	return 0;
}
subsys_initcall(blk_settings_init);