blk-settings.c 25.0 KB
Newer Older
J
Jens Axboe 已提交
1 2 3 4 5 6 7 8 9
/*
 * Functions related to setting various queue properties from drivers
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
10
#include <linux/gcd.h>
11
#include <linux/lcm.h>
R
Randy Dunlap 已提交
12
#include <linux/jiffies.h>
13
#include <linux/gfp.h>
J
Jens Axboe 已提交
14 15 16

#include "blk.h"

17
unsigned long blk_max_low_pfn;
J
Jens Axboe 已提交
18
EXPORT_SYMBOL(blk_max_low_pfn);
19 20

unsigned long blk_max_pfn;
J
Jens Axboe 已提交
21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38

/**
 * blk_queue_prep_rq - set a prepare_request function for queue
 * @q:		queue
 * @pfn:	prepare_request function
 *
 * It's possible for a queue to register a prepare_request callback which
 * is invoked before the request is handed to the request_fn. The goal of
 * the function is to prepare a request for I/O, it can be used to build a
 * cdb from the request data for instance.
 *
 */
void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
{
	q->prep_rq_fn = pfn;
}
EXPORT_SYMBOL(blk_queue_prep_rq);

39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
/**
 * blk_queue_unprep_rq - set an unprepare_request function for queue
 * @q:		queue
 * @ufn:	unprepare_request function
 *
 * It's possible for a queue to register an unprepare_request callback
 * which is invoked before the request is finally completed. The goal
 * of the function is to deallocate any data that was allocated in the
 * prepare_request callback.
 *
 */
void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn)
{
	q->unprep_rq_fn = ufn;
}
EXPORT_SYMBOL(blk_queue_unprep_rq);

J
Jens Axboe 已提交
56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
/**
 * blk_queue_merge_bvec - set a merge_bvec function for queue
 * @q:		queue
 * @mbfn:	merge_bvec_fn
 *
 * Usually queues have static limitations on the max sectors or segments that
 * we can put in a request. Stacking drivers may have some settings that
 * are dynamic, and thus we have to query the queue whether it is ok to
 * add a new bio_vec to a bio at a given offset or not. If the block device
 * has such limitations, it needs to register a merge_bvec_fn to control
 * the size of bio's sent to it. Note that a block device *must* allow a
 * single page to be added to an empty bio. The block device driver may want
 * to use the bio_split() function to deal with these bio's. By default
 * no merge_bvec_fn is defined for a queue, and only the fixed limits are
 * honored.
 */
void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
{
	q->merge_bvec_fn = mbfn;
}
EXPORT_SYMBOL(blk_queue_merge_bvec);

void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
{
	q->softirq_done_fn = fn;
}
EXPORT_SYMBOL(blk_queue_softirq_done);

J
Jens Axboe 已提交
84 85 86 87 88 89 90 91 92 93 94 95
void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
{
	q->rq_timeout = timeout;
}
EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);

void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
{
	q->rq_timed_out_fn = fn;
}
EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);

96 97 98 99 100 101
void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
{
	q->lld_busy_fn = fn;
}
EXPORT_SYMBOL_GPL(blk_queue_lld_busy);

102 103
/**
 * blk_set_default_limits - reset limits to default values
104
 * @lim:  the queue_limits structure to reset
105 106 107 108 109 110 111 112
 *
 * Description:
 *   Returns a queue_limit struct to its default state.  Can be used by
 *   stacking drivers like DM that stage table swaps and reuse an
 *   existing device queue.
 */
void blk_set_default_limits(struct queue_limits *lim)
{
113
	lim->max_segments = BLK_MAX_SEGMENTS;
114
	lim->max_integrity_segments = 0;
115
	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
116
	lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
117 118
	lim->max_sectors = BLK_DEF_MAX_SECTORS;
	lim->max_hw_sectors = INT_MAX;
119 120 121 122
	lim->max_discard_sectors = 0;
	lim->discard_granularity = 0;
	lim->discard_alignment = 0;
	lim->discard_misaligned = 0;
123
	lim->discard_zeroes_data = -1;
124
	lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
125
	lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
126 127 128 129 130 131 132
	lim->alignment_offset = 0;
	lim->io_opt = 0;
	lim->misaligned = 0;
	lim->no_cluster = 0;
}
EXPORT_SYMBOL(blk_set_default_limits);

J
Jens Axboe 已提交
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
/**
 * blk_queue_make_request - define an alternate make_request function for a device
 * @q:  the request queue for the device to be affected
 * @mfn: the alternate make_request function
 *
 * Description:
 *    The normal way for &struct bios to be passed to a device
 *    driver is for them to be collected into requests on a request
 *    queue, and then to allow the device driver to select requests
 *    off that queue when it is ready.  This works well for many block
 *    devices. However some block devices (typically virtual devices
 *    such as md or lvm) do not benefit from the processing on the
 *    request queue, and are served best by having the requests passed
 *    directly to them.  This can be achieved by providing a function
 *    to blk_queue_make_request().
 *
 * Caveat:
 *    The driver that does this *must* be able to deal appropriately
 *    with buffers in "highmemory". This can be accomplished by either calling
 *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
 *    blk_queue_bounce() to create a buffer in normal memory.
 **/
155
void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
J
Jens Axboe 已提交
156 157 158 159 160
{
	/*
	 * set defaults
	 */
	q->nr_requests = BLKDEV_MAX_RQ;
161

J
Jens Axboe 已提交
162 163 164 165 166 167
	q->make_request_fn = mfn;
	blk_queue_dma_alignment(q, 511);
	blk_queue_congestion_threshold(q);
	q->nr_batching = BLK_BATCH_REQ;

	q->unplug_thresh = 4;		/* hmm */
R
Randy Dunlap 已提交
168
	q->unplug_delay = msecs_to_jiffies(3);	/* 3 milliseconds */
J
Jens Axboe 已提交
169 170 171 172 173 174
	if (q->unplug_delay == 0)
		q->unplug_delay = 1;

	q->unplug_timer.function = blk_unplug_timeout;
	q->unplug_timer.data = (unsigned long)q;

175
	blk_set_default_limits(&q->limits);
176
	blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
177

178 179 180 181 182 183 184
	/*
	 * If the caller didn't supply a lock, fall back to our embedded
	 * per-queue locks
	 */
	if (!q->queue_lock)
		q->queue_lock = &q->__queue_lock;

J
Jens Axboe 已提交
185 186 187 188 189 190 191 192 193
	/*
	 * by default assume old behaviour and bounce for any highmem page
	 */
	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
}
EXPORT_SYMBOL(blk_queue_make_request);

/**
 * blk_queue_bounce_limit - set bounce buffer limit for queue
194 195
 * @q: the request queue for the device
 * @dma_mask: the maximum address the device can handle
J
Jens Axboe 已提交
196 197 198 199 200
 *
 * Description:
 *    Different hardware can have different requirements as to what pages
 *    it can do I/O directly to. A low level driver can call
 *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
201
 *    buffers for doing I/O to pages residing above @dma_mask.
J
Jens Axboe 已提交
202
 **/
203
void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
J
Jens Axboe 已提交
204
{
205
	unsigned long b_pfn = dma_mask >> PAGE_SHIFT;
J
Jens Axboe 已提交
206 207 208 209
	int dma = 0;

	q->bounce_gfp = GFP_NOIO;
#if BITS_PER_LONG == 64
210 211 212 213 214 215
	/*
	 * Assume anything <= 4GB can be handled by IOMMU.  Actually
	 * some IOMMUs can handle everything, but I don't know of a
	 * way to test this here.
	 */
	if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
J
Jens Axboe 已提交
216 217
		dma = 1;
#else
218
	if (b_pfn < blk_max_low_pfn)
J
Jens Axboe 已提交
219 220
		dma = 1;
#endif
221
	q->limits.bounce_pfn = b_pfn;
J
Jens Axboe 已提交
222 223 224 225 226 227 228 229
	if (dma) {
		init_emergency_isa_pool();
		q->bounce_gfp = GFP_NOIO | GFP_DMA;
	}
}
EXPORT_SYMBOL(blk_queue_bounce_limit);

/**
230
 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
J
Jens Axboe 已提交
231
 * @q:  the request queue for the device
232
 * @max_hw_sectors:  max hardware sectors in the usual 512b unit
J
Jens Axboe 已提交
233 234
 *
 * Description:
235 236 237 238 239 240 241 242 243
 *    Enables a low level driver to set a hard upper limit,
 *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
 *    the device driver based upon the combined capabilities of I/O
 *    controller and storage device.
 *
 *    max_sectors is a soft limit imposed by the block layer for
 *    filesystem type requests.  This value can be overridden on a
 *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.
 *    The soft limit can not exceed max_hw_sectors.
J
Jens Axboe 已提交
244
 **/
245
void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
J
Jens Axboe 已提交
246
{
247 248
	if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
		max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
249
		printk(KERN_INFO "%s: set to minimum %d\n",
250
		       __func__, max_hw_sectors);
J
Jens Axboe 已提交
251 252
	}

253 254 255
	q->limits.max_hw_sectors = max_hw_sectors;
	q->limits.max_sectors = min_t(unsigned int, max_hw_sectors,
				      BLK_DEF_MAX_SECTORS);
J
Jens Axboe 已提交
256
}
257
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
J
Jens Axboe 已提交
258

259 260 261
/**
 * blk_queue_max_discard_sectors - set max sectors for a single discard
 * @q:  the request queue for the device
262
 * @max_discard_sectors: maximum number of sectors to discard
263 264 265 266 267 268 269 270
 **/
void blk_queue_max_discard_sectors(struct request_queue *q,
		unsigned int max_discard_sectors)
{
	q->limits.max_discard_sectors = max_discard_sectors;
}
EXPORT_SYMBOL(blk_queue_max_discard_sectors);

J
Jens Axboe 已提交
271
/**
272
 * blk_queue_max_segments - set max hw segments for a request for this queue
J
Jens Axboe 已提交
273 274 275 276 277
 * @q:  the request queue for the device
 * @max_segments:  max number of segments
 *
 * Description:
 *    Enables a low level driver to set an upper limit on the number of
278
 *    hw data segments in a request.
J
Jens Axboe 已提交
279
 **/
280
void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
J
Jens Axboe 已提交
281 282 283
{
	if (!max_segments) {
		max_segments = 1;
284 285
		printk(KERN_INFO "%s: set to minimum %d\n",
		       __func__, max_segments);
J
Jens Axboe 已提交
286 287
	}

288
	q->limits.max_segments = max_segments;
J
Jens Axboe 已提交
289
}
290
EXPORT_SYMBOL(blk_queue_max_segments);
J
Jens Axboe 已提交
291 292 293 294 295 296 297 298 299 300 301 302 303 304

/**
 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
 * @q:  the request queue for the device
 * @max_size:  max size of segment in bytes
 *
 * Description:
 *    Enables a low level driver to set an upper limit on the size of a
 *    coalesced segment
 **/
void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
{
	if (max_size < PAGE_CACHE_SIZE) {
		max_size = PAGE_CACHE_SIZE;
305 306
		printk(KERN_INFO "%s: set to minimum %d\n",
		       __func__, max_size);
J
Jens Axboe 已提交
307 308
	}

309
	q->limits.max_segment_size = max_size;
J
Jens Axboe 已提交
310 311 312 313
}
EXPORT_SYMBOL(blk_queue_max_segment_size);

/**
314
 * blk_queue_logical_block_size - set logical block size for the queue
J
Jens Axboe 已提交
315
 * @q:  the request queue for the device
316
 * @size:  the logical block size, in bytes
J
Jens Axboe 已提交
317 318
 *
 * Description:
319 320 321
 *   This should be set to the lowest possible block size that the
 *   storage device can address.  The default of 512 covers most
 *   hardware.
J
Jens Axboe 已提交
322
 **/
323
void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
J
Jens Axboe 已提交
324
{
325
	q->limits.logical_block_size = size;
326 327 328 329 330 331

	if (q->limits.physical_block_size < size)
		q->limits.physical_block_size = size;

	if (q->limits.io_min < q->limits.physical_block_size)
		q->limits.io_min = q->limits.physical_block_size;
J
Jens Axboe 已提交
332
}
333
EXPORT_SYMBOL(blk_queue_logical_block_size);
J
Jens Axboe 已提交
334

335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
/**
 * blk_queue_physical_block_size - set physical block size for the queue
 * @q:  the request queue for the device
 * @size:  the physical block size, in bytes
 *
 * Description:
 *   This should be set to the lowest possible sector size that the
 *   hardware can operate on without reverting to read-modify-write
 *   operations.
 */
void blk_queue_physical_block_size(struct request_queue *q, unsigned short size)
{
	q->limits.physical_block_size = size;

	if (q->limits.physical_block_size < q->limits.logical_block_size)
		q->limits.physical_block_size = q->limits.logical_block_size;

	if (q->limits.io_min < q->limits.physical_block_size)
		q->limits.io_min = q->limits.physical_block_size;
}
EXPORT_SYMBOL(blk_queue_physical_block_size);

/**
 * blk_queue_alignment_offset - set physical block alignment offset
 * @q:	the request queue for the device
360
 * @offset: alignment offset in bytes
361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
 *
 * Description:
 *   Some devices are naturally misaligned to compensate for things like
 *   the legacy DOS partition table 63-sector offset.  Low-level drivers
 *   should call this function for devices whose first sector is not
 *   naturally aligned.
 */
void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
{
	q->limits.alignment_offset =
		offset & (q->limits.physical_block_size - 1);
	q->limits.misaligned = 0;
}
EXPORT_SYMBOL(blk_queue_alignment_offset);

376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
/**
 * blk_limits_io_min - set minimum request size for a device
 * @limits: the queue limits
 * @min:  smallest I/O size in bytes
 *
 * Description:
 *   Some devices have an internal block size bigger than the reported
 *   hardware sector size.  This function can be used to signal the
 *   smallest I/O the device can perform without incurring a performance
 *   penalty.
 */
void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
{
	limits->io_min = min;

	if (limits->io_min < limits->logical_block_size)
		limits->io_min = limits->logical_block_size;

	if (limits->io_min < limits->physical_block_size)
		limits->io_min = limits->physical_block_size;
}
EXPORT_SYMBOL(blk_limits_io_min);

399 400 401
/**
 * blk_queue_io_min - set minimum request size for the queue
 * @q:	the request queue for the device
402
 * @min:  smallest I/O size in bytes
403 404
 *
 * Description:
405 406 407 408 409 410 411
 *   Storage devices may report a granularity or preferred minimum I/O
 *   size which is the smallest request the device can perform without
 *   incurring a performance penalty.  For disk drives this is often the
 *   physical block size.  For RAID arrays it is often the stripe chunk
 *   size.  A properly aligned multiple of minimum_io_size is the
 *   preferred request size for workloads where a high number of I/O
 *   operations is desired.
412 413 414
 */
void blk_queue_io_min(struct request_queue *q, unsigned int min)
{
415
	blk_limits_io_min(&q->limits, min);
416 417 418
}
EXPORT_SYMBOL(blk_queue_io_min);

419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
/**
 * blk_limits_io_opt - set optimal request size for a device
 * @limits: the queue limits
 * @opt:  smallest I/O size in bytes
 *
 * Description:
 *   Storage devices may report an optimal I/O size, which is the
 *   device's preferred unit for sustained I/O.  This is rarely reported
 *   for disk drives.  For RAID arrays it is usually the stripe width or
 *   the internal track size.  A properly aligned multiple of
 *   optimal_io_size is the preferred request size for workloads where
 *   sustained throughput is desired.
 */
void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
{
	limits->io_opt = opt;
}
EXPORT_SYMBOL(blk_limits_io_opt);

438 439 440
/**
 * blk_queue_io_opt - set optimal request size for the queue
 * @q:	the request queue for the device
441
 * @opt:  optimal request size in bytes
442 443
 *
 * Description:
444 445 446 447 448 449
 *   Storage devices may report an optimal I/O size, which is the
 *   device's preferred unit for sustained I/O.  This is rarely reported
 *   for disk drives.  For RAID arrays it is usually the stripe width or
 *   the internal track size.  A properly aligned multiple of
 *   optimal_io_size is the preferred request size for workloads where
 *   sustained throughput is desired.
450 451 452
 */
void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
{
453
	blk_limits_io_opt(&q->limits, opt);
454 455 456
}
EXPORT_SYMBOL(blk_queue_io_opt);

J
Jens Axboe 已提交
457 458 459 460 461 462 463
/**
 * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
 * @t:	the stacking driver (top)
 * @b:  the underlying device (bottom)
 **/
void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
{
464
	blk_stack_limits(&t->limits, &b->limits, 0);
465

466 467 468 469 470
	if (!t->queue_lock)
		WARN_ON_ONCE(1);
	else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
		unsigned long flags;
		spin_lock_irqsave(t->queue_lock, flags);
N
Nick Piggin 已提交
471
		queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
472 473
		spin_unlock_irqrestore(t->queue_lock, flags);
	}
J
Jens Axboe 已提交
474 475 476
}
EXPORT_SYMBOL(blk_queue_stack_limits);

477 478
/**
 * blk_stack_limits - adjust queue_limits for stacked devices
479 480
 * @t:	the stacking driver limits (top device)
 * @b:  the underlying queue limits (bottom, component device)
481
 * @start:  first data sector within component device
482 483
 *
 * Description:
484 485 486 487 488 489 490 491 492 493 494 495 496
 *    This function is used by stacking drivers like MD and DM to ensure
 *    that all component devices have compatible block sizes and
 *    alignments.  The stacking driver must provide a queue_limits
 *    struct (top) and then iteratively call the stacking function for
 *    all component (bottom) devices.  The stacking function will
 *    attempt to combine the values and ensure proper alignment.
 *
 *    Returns 0 if the top and bottom queue_limits are compatible.  The
 *    top device's block sizes and alignment offsets may be adjusted to
 *    ensure alignment with the bottom device. If no compatible sizes
 *    and alignments exist, -1 is returned and the resulting top
 *    queue_limits will have the misaligned flag set to indicate that
 *    the alignment_offset is undefined.
497 498
 */
int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
499
		     sector_t start)
500
{
501
	unsigned int top, bottom, alignment, ret = 0;
502

503 504
	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
505
	t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
506 507 508 509

	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
					    b->seg_boundary_mask);

510
	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
511 512
	t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
						 b->max_integrity_segments);
513 514 515 516

	t->max_segment_size = min_not_zero(t->max_segment_size,
					   b->max_segment_size);

517 518
	t->misaligned |= b->misaligned;

519
	alignment = queue_limit_alignment_offset(b, start);
520

521 522 523
	/* Bottom device has different alignment.  Check that it is
	 * compatible with the current top alignment.
	 */
524 525 526 527
	if (t->alignment_offset != alignment) {

		top = max(t->physical_block_size, t->io_min)
			+ t->alignment_offset;
528
		bottom = max(b->physical_block_size, b->io_min) + alignment;
529

530
		/* Verify that top and bottom intervals line up */
531
		if (max(top, bottom) & (min(top, bottom) - 1)) {
532
			t->misaligned = 1;
533 534
			ret = -1;
		}
535 536
	}

537 538 539 540 541 542 543
	t->logical_block_size = max(t->logical_block_size,
				    b->logical_block_size);

	t->physical_block_size = max(t->physical_block_size,
				     b->physical_block_size);

	t->io_min = max(t->io_min, b->io_min);
544 545
	t->io_opt = lcm(t->io_opt, b->io_opt);

546
	t->no_cluster |= b->no_cluster;
547
	t->discard_zeroes_data &= b->discard_zeroes_data;
548

549
	/* Physical block size a multiple of the logical block size? */
550 551
	if (t->physical_block_size & (t->logical_block_size - 1)) {
		t->physical_block_size = t->logical_block_size;
552
		t->misaligned = 1;
553
		ret = -1;
554 555
	}

556
	/* Minimum I/O a multiple of the physical block size? */
557 558 559
	if (t->io_min & (t->physical_block_size - 1)) {
		t->io_min = t->physical_block_size;
		t->misaligned = 1;
560
		ret = -1;
561 562
	}

563
	/* Optimal I/O a multiple of the physical block size? */
564 565 566
	if (t->io_opt & (t->physical_block_size - 1)) {
		t->io_opt = 0;
		t->misaligned = 1;
567
		ret = -1;
568
	}
569

570
	/* Find lowest common alignment_offset */
571 572
	t->alignment_offset = lcm(t->alignment_offset, alignment)
		& (max(t->physical_block_size, t->io_min) - 1);
573

574
	/* Verify that new alignment_offset is on a logical block boundary */
575
	if (t->alignment_offset & (t->logical_block_size - 1)) {
576
		t->misaligned = 1;
577 578
		ret = -1;
	}
579

580 581
	/* Discard alignment and granularity */
	if (b->discard_granularity) {
582
		alignment = queue_limit_discard_alignment(b, start);
583 584 585 586 587

		if (t->discard_granularity != 0 &&
		    t->discard_alignment != alignment) {
			top = t->discard_granularity + t->discard_alignment;
			bottom = b->discard_granularity + alignment;
588

589 590 591 592 593
			/* Verify that top and bottom intervals line up */
			if (max(top, bottom) & (min(top, bottom) - 1))
				t->discard_misaligned = 1;
		}

594 595
		t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
						      b->max_discard_sectors);
596 597 598 599 600
		t->discard_granularity = max(t->discard_granularity,
					     b->discard_granularity);
		t->discard_alignment = lcm(t->discard_alignment, alignment) &
			(t->discard_granularity - 1);
	}
601

602
	return ret;
603
}
M
Mike Snitzer 已提交
604
EXPORT_SYMBOL(blk_stack_limits);
605

606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623
/**
 * bdev_stack_limits - adjust queue limits for stacked drivers
 * @t:	the stacking driver limits (top device)
 * @bdev:  the component block_device (bottom)
 * @start:  first data sector within component device
 *
 * Description:
 *    Merges queue limits for a top device and a block_device.  Returns
 *    0 if alignment didn't change.  Returns -1 if adding the bottom
 *    device caused misalignment.
 */
int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
		      sector_t start)
{
	struct request_queue *bq = bdev_get_queue(bdev);

	start += get_start_sect(bdev);

624
	return blk_stack_limits(t, &bq->limits, start);
625 626 627
}
EXPORT_SYMBOL(bdev_stack_limits);

628 629
/**
 * disk_stack_limits - adjust queue limits for stacked drivers
630
 * @disk:  MD/DM gendisk (top)
631 632 633 634
 * @bdev:  the underlying block device (bottom)
 * @offset:  offset to beginning of data within component device
 *
 * Description:
635 636
 *    Merges the limits for a top level gendisk and a bottom level
 *    block_device.
637 638 639 640 641 642 643
 */
void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
		       sector_t offset)
{
	struct request_queue *t = disk->queue;
	struct request_queue *b = bdev_get_queue(bdev);

644
	if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666
		char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];

		disk_name(disk, 0, top);
		bdevname(bdev, bottom);

		printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
		       top, bottom);
	}

	if (!t->queue_lock)
		WARN_ON_ONCE(1);
	else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
		unsigned long flags;

		spin_lock_irqsave(t->queue_lock, flags);
		if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
			queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
		spin_unlock_irqrestore(t->queue_lock, flags);
	}
}
EXPORT_SYMBOL(disk_stack_limits);

667 668 669 670 671
/**
 * blk_queue_dma_pad - set pad mask
 * @q:     the request queue for the device
 * @mask:  pad mask
 *
672
 * Set dma pad mask.
673
 *
674 675
 * Appending pad buffer to a request modifies the last entry of a
 * scatter list such that it includes the pad buffer.
676 677 678 679 680 681 682
 **/
void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
{
	q->dma_pad_mask = mask;
}
EXPORT_SYMBOL(blk_queue_dma_pad);

683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
/**
 * blk_queue_update_dma_pad - update pad mask
 * @q:     the request queue for the device
 * @mask:  pad mask
 *
 * Update dma pad mask.
 *
 * Appending pad buffer to a request modifies the last entry of a
 * scatter list such that it includes the pad buffer.
 **/
void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
{
	if (mask > q->dma_pad_mask)
		q->dma_pad_mask = mask;
}
EXPORT_SYMBOL(blk_queue_update_dma_pad);

J
Jens Axboe 已提交
700 701 702
/**
 * blk_queue_dma_drain - Set up a drain buffer for excess dma.
 * @q:  the request queue for the device
703
 * @dma_drain_needed: fn which returns non-zero if drain is necessary
J
Jens Axboe 已提交
704 705 706 707 708 709 710 711 712 713 714 715
 * @buf:	physically contiguous buffer
 * @size:	size of the buffer in bytes
 *
 * Some devices have excess DMA problems and can't simply discard (or
 * zero fill) the unwanted piece of the transfer.  They have to have a
 * real area of memory to transfer it into.  The use case for this is
 * ATAPI devices in DMA mode.  If the packet command causes a transfer
 * bigger than the transfer size some HBAs will lock up if there
 * aren't DMA elements to contain the excess transfer.  What this API
 * does is adjust the queue so that the buf is always appended
 * silently to the scatterlist.
 *
716 717 718 719
 * Note: This routine adjusts max_hw_segments to make room for appending
 * the drain buffer.  If you call blk_queue_max_segments() after calling
 * this routine, you must set the limit to one fewer than your device
 * can support otherwise there won't be room for the drain buffer.
J
Jens Axboe 已提交
720
 */
721
int blk_queue_dma_drain(struct request_queue *q,
722 723
			       dma_drain_needed_fn *dma_drain_needed,
			       void *buf, unsigned int size)
J
Jens Axboe 已提交
724
{
725
	if (queue_max_segments(q) < 2)
J
Jens Axboe 已提交
726 727
		return -EINVAL;
	/* make room for appending the drain */
728
	blk_queue_max_segments(q, queue_max_segments(q) - 1);
729
	q->dma_drain_needed = dma_drain_needed;
J
Jens Axboe 已提交
730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745
	q->dma_drain_buffer = buf;
	q->dma_drain_size = size;

	return 0;
}
EXPORT_SYMBOL_GPL(blk_queue_dma_drain);

/**
 * blk_queue_segment_boundary - set boundary rules for segment merging
 * @q:  the request queue for the device
 * @mask:  the memory boundary mask
 **/
void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
{
	if (mask < PAGE_CACHE_SIZE - 1) {
		mask = PAGE_CACHE_SIZE - 1;
746 747
		printk(KERN_INFO "%s: set to minimum %lx\n",
		       __func__, mask);
J
Jens Axboe 已提交
748 749
	}

750
	q->limits.seg_boundary_mask = mask;
J
Jens Axboe 已提交
751 752 753 754 755 756 757 758 759
}
EXPORT_SYMBOL(blk_queue_segment_boundary);

/**
 * blk_queue_dma_alignment - set dma length and memory alignment
 * @q:     the request queue for the device
 * @mask:  alignment mask
 *
 * description:
760
 *    set required memory and length alignment for direct dma transactions.
A
Alan Cox 已提交
761
 *    this is used when building direct io requests for the queue.
J
Jens Axboe 已提交
762 763 764 765 766 767 768 769 770 771 772 773 774 775
 *
 **/
void blk_queue_dma_alignment(struct request_queue *q, int mask)
{
	q->dma_alignment = mask;
}
EXPORT_SYMBOL(blk_queue_dma_alignment);

/**
 * blk_queue_update_dma_alignment - update dma length and memory alignment
 * @q:     the request queue for the device
 * @mask:  alignment mask
 *
 * description:
776
 *    update required memory and length alignment for direct dma transactions.
J
Jens Axboe 已提交
777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792
 *    If the requested alignment is larger than the current alignment, then
 *    the current queue alignment is updated to the new value, otherwise it
 *    is left alone.  The design of this is to allow multiple objects
 *    (driver, device, transport etc) to set their respective
 *    alignments without having them interfere.
 *
 **/
void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
{
	BUG_ON(mask > PAGE_SIZE);

	if (mask > q->dma_alignment)
		q->dma_alignment = mask;
}
EXPORT_SYMBOL(blk_queue_update_dma_alignment);

793
static int __init blk_settings_init(void)
J
Jens Axboe 已提交
794 795 796 797 798 799
{
	blk_max_low_pfn = max_low_pfn - 1;
	blk_max_pfn = max_pfn - 1;
	return 0;
}
subsys_initcall(blk_settings_init);