提交 defd94b7 编写于 作者: M Mike Christie 提交者: James Bottomley

[SCSI] seperate max_sectors from max_hw_sectors

- export __blk_put_request and blk_execute_rq_nowait
needed for async REQ_BLOCK_PC requests
- seperate max_hw_sectors and max_sectors for block/scsi_ioctl.c and
SG_IO bio.c helpers per Jens's last comments. Since block/scsi_ioctl.c SG_IO was
already testing against max_sectors and SCSI-ml was setting max_sectors and
max_hw_sectors to the same value this does not change any scsi SG_IO behavior. It only
prepares ll_rw_blk.c, scsi_ioctl.c and bio.c for when SCSI-ml begins to set
a valid max_hw_sectors for all LLDs. Today if a LLD does not set it
SCSI-ml sets it to a safe default and some LLDs set it to a artificial low
value to overcome memory and feedback issues.

Note: Since we now cap max_sectors to BLK_DEF_MAX_SECTORS, which is 1024,
drivers that used to call blk_queue_max_sectors with a large value of
max_sectors will now see the fs requests capped to BLK_DEF_MAX_SECTORS.
Signed-off-by: NMike Christie <michaelc@cs.wisc.edu>
Signed-off-by: NJames Bottomley <James.Bottomley@SteelEye.com>
上级 8b05b773
...@@ -239,7 +239,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn) ...@@ -239,7 +239,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
q->backing_dev_info.state = 0; q->backing_dev_info.state = 0;
q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
blk_queue_max_sectors(q, MAX_SECTORS); blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
blk_queue_hardsect_size(q, 512); blk_queue_hardsect_size(q, 512);
blk_queue_dma_alignment(q, 511); blk_queue_dma_alignment(q, 511);
blk_queue_congestion_threshold(q); blk_queue_congestion_threshold(q);
...@@ -555,7 +555,12 @@ void blk_queue_max_sectors(request_queue_t *q, unsigned short max_sectors) ...@@ -555,7 +555,12 @@ void blk_queue_max_sectors(request_queue_t *q, unsigned short max_sectors)
printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors); printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors);
} }
q->max_sectors = q->max_hw_sectors = max_sectors; if (BLK_DEF_MAX_SECTORS > max_sectors)
q->max_hw_sectors = q->max_sectors = max_sectors;
else {
q->max_sectors = BLK_DEF_MAX_SECTORS;
q->max_hw_sectors = max_sectors;
}
} }
EXPORT_SYMBOL(blk_queue_max_sectors); EXPORT_SYMBOL(blk_queue_max_sectors);
...@@ -657,8 +662,8 @@ EXPORT_SYMBOL(blk_queue_hardsect_size); ...@@ -657,8 +662,8 @@ EXPORT_SYMBOL(blk_queue_hardsect_size);
void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b) void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b)
{ {
/* zero is "infinity" */ /* zero is "infinity" */
t->max_sectors = t->max_hw_sectors = t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors);
min_not_zero(t->max_sectors,b->max_sectors); t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors);
t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments); t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments);
t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments); t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments);
...@@ -1293,9 +1298,15 @@ static inline int ll_new_hw_segment(request_queue_t *q, ...@@ -1293,9 +1298,15 @@ static inline int ll_new_hw_segment(request_queue_t *q,
static int ll_back_merge_fn(request_queue_t *q, struct request *req, static int ll_back_merge_fn(request_queue_t *q, struct request *req,
struct bio *bio) struct bio *bio)
{ {
unsigned short max_sectors;
int len; int len;
if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) { if (unlikely(blk_pc_request(req)))
max_sectors = q->max_hw_sectors;
else
max_sectors = q->max_sectors;
if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
req->flags |= REQ_NOMERGE; req->flags |= REQ_NOMERGE;
if (req == q->last_merge) if (req == q->last_merge)
q->last_merge = NULL; q->last_merge = NULL;
...@@ -1325,9 +1336,16 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req, ...@@ -1325,9 +1336,16 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req,
static int ll_front_merge_fn(request_queue_t *q, struct request *req, static int ll_front_merge_fn(request_queue_t *q, struct request *req,
struct bio *bio) struct bio *bio)
{ {
unsigned short max_sectors;
int len; int len;
if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) { if (unlikely(blk_pc_request(req)))
max_sectors = q->max_hw_sectors;
else
max_sectors = q->max_sectors;
if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
req->flags |= REQ_NOMERGE; req->flags |= REQ_NOMERGE;
if (req == q->last_merge) if (req == q->last_merge)
q->last_merge = NULL; q->last_merge = NULL;
...@@ -2144,7 +2162,7 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, ...@@ -2144,7 +2162,7 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
struct bio *bio; struct bio *bio;
int reading; int reading;
if (len > (q->max_sectors << 9)) if (len > (q->max_hw_sectors << 9))
return -EINVAL; return -EINVAL;
if (!len || !ubuf) if (!len || !ubuf)
return -EINVAL; return -EINVAL;
...@@ -2259,7 +2277,7 @@ int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf, ...@@ -2259,7 +2277,7 @@ int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
{ {
struct bio *bio; struct bio *bio;
if (len > (q->max_sectors << 9)) if (len > (q->max_hw_sectors << 9))
return -EINVAL; return -EINVAL;
if (!len || !kbuf) if (!len || !kbuf)
return -EINVAL; return -EINVAL;
......
...@@ -233,7 +233,7 @@ static int sg_io(struct file *file, request_queue_t *q, ...@@ -233,7 +233,7 @@ static int sg_io(struct file *file, request_queue_t *q,
if (verify_command(file, cmd)) if (verify_command(file, cmd))
return -EPERM; return -EPERM;
if (hdr->dxfer_len > (q->max_sectors << 9)) if (hdr->dxfer_len > (q->max_hw_sectors << 9))
return -EIO; return -EIO;
if (hdr->dxfer_len) if (hdr->dxfer_len)
......
...@@ -638,7 +638,7 @@ int dm_split_args(int *argc, char ***argvp, char *input) ...@@ -638,7 +638,7 @@ int dm_split_args(int *argc, char ***argvp, char *input)
static void check_for_valid_limits(struct io_restrictions *rs) static void check_for_valid_limits(struct io_restrictions *rs)
{ {
if (!rs->max_sectors) if (!rs->max_sectors)
rs->max_sectors = MAX_SECTORS; rs->max_sectors = SAFE_MAX_SECTORS;
if (!rs->max_phys_segments) if (!rs->max_phys_segments)
rs->max_phys_segments = MAX_PHYS_SEGMENTS; rs->max_phys_segments = MAX_PHYS_SEGMENTS;
if (!rs->max_hw_segments) if (!rs->max_hw_segments)
......
...@@ -462,6 +462,7 @@ int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd, ...@@ -462,6 +462,7 @@ int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
req = blk_get_request(sdev->request_queue, write, gfp); req = blk_get_request(sdev->request_queue, write, gfp);
if (!req) if (!req)
goto free_sense; goto free_sense;
req->flags |= REQ_BLOCK_PC | REQ_QUIET;
if (use_sg) if (use_sg)
err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp); err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp);
...@@ -477,7 +478,6 @@ int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd, ...@@ -477,7 +478,6 @@ int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
req->sense_len = 0; req->sense_len = 0;
req->timeout = timeout; req->timeout = timeout;
req->retries = retries; req->retries = retries;
req->flags |= REQ_BLOCK_PC | REQ_QUIET;
req->end_io_data = sioc; req->end_io_data = sioc;
sioc->data = privdata; sioc->data = privdata;
......
...@@ -313,7 +313,8 @@ int bio_get_nr_vecs(struct block_device *bdev) ...@@ -313,7 +313,8 @@ int bio_get_nr_vecs(struct block_device *bdev)
} }
static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
*page, unsigned int len, unsigned int offset) *page, unsigned int len, unsigned int offset,
unsigned short max_sectors)
{ {
int retried_segments = 0; int retried_segments = 0;
struct bio_vec *bvec; struct bio_vec *bvec;
...@@ -327,7 +328,7 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page ...@@ -327,7 +328,7 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
if (bio->bi_vcnt >= bio->bi_max_vecs) if (bio->bi_vcnt >= bio->bi_max_vecs)
return 0; return 0;
if (((bio->bi_size + len) >> 9) > q->max_sectors) if (((bio->bi_size + len) >> 9) > max_sectors)
return 0; return 0;
/* /*
...@@ -401,7 +402,7 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page ...@@ -401,7 +402,7 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page, int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page,
unsigned int len, unsigned int offset) unsigned int len, unsigned int offset)
{ {
return __bio_add_page(q, bio, page, len, offset); return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors);
} }
/** /**
...@@ -420,8 +421,8 @@ int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page, ...@@ -420,8 +421,8 @@ int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page,
int bio_add_page(struct bio *bio, struct page *page, unsigned int len, int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
unsigned int offset) unsigned int offset)
{ {
return __bio_add_page(bdev_get_queue(bio->bi_bdev), bio, page, struct request_queue *q = bdev_get_queue(bio->bi_bdev);
len, offset); return __bio_add_page(q, bio, page, len, offset, q->max_sectors);
} }
struct bio_map_data { struct bio_map_data {
...@@ -533,7 +534,7 @@ struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr, ...@@ -533,7 +534,7 @@ struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr,
break; break;
} }
if (__bio_add_page(q, bio, page, bytes, 0) < bytes) { if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) {
ret = -EINVAL; ret = -EINVAL;
break; break;
} }
...@@ -647,7 +648,8 @@ static struct bio *__bio_map_user_iov(request_queue_t *q, ...@@ -647,7 +648,8 @@ static struct bio *__bio_map_user_iov(request_queue_t *q,
/* /*
* sorry... * sorry...
*/ */
if (__bio_add_page(q, bio, pages[j], bytes, offset) < bytes) if (bio_add_pc_page(q, bio, pages[j], bytes, offset) <
bytes)
break; break;
len -= bytes; len -= bytes;
...@@ -820,8 +822,8 @@ static struct bio *__bio_map_kern(request_queue_t *q, void *data, ...@@ -820,8 +822,8 @@ static struct bio *__bio_map_kern(request_queue_t *q, void *data,
if (bytes > len) if (bytes > len)
bytes = len; bytes = len;
if (__bio_add_page(q, bio, virt_to_page(data), bytes, if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
offset) < bytes) offset) < bytes)
break; break;
data += bytes; data += bytes;
......
...@@ -702,7 +702,8 @@ extern int blkdev_issue_flush(struct block_device *, sector_t *); ...@@ -702,7 +702,8 @@ extern int blkdev_issue_flush(struct block_device *, sector_t *);
#define MAX_PHYS_SEGMENTS 128 #define MAX_PHYS_SEGMENTS 128
#define MAX_HW_SEGMENTS 128 #define MAX_HW_SEGMENTS 128
#define MAX_SECTORS 255 #define SAFE_MAX_SECTORS 255
#define BLK_DEF_MAX_SECTORS 1024
#define MAX_SEGMENT_SIZE 65536 #define MAX_SEGMENT_SIZE 65536
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册