提交 5cb8850c 编写于 作者: K Kent Overstreet 提交者: Jens Axboe

block: Explicitly handle discard/write same segments

Immutable biovecs changed the way biovecs are interpreted - drivers no
longer use bi_vcnt, they have to go by bi_iter.bi_size (to allow for
using part of an existing segment without modifying it).

This breaks with discards and write_same bios, since for those bi_size
has nothing to do with segments in the biovec. So for now, we need a
fairly gross hack - we fortunately know that there will never be more
than one segment for the entire request, so we can special case
discard/write_same.
Signed-off-by: NKent Overstreet <kmo@daterainc.com>
Tested-by: NHugh Dickins <hughd@google.com>
Signed-off-by: NJens Axboe <axboe@fb.com>
上级 08778795
...@@ -21,6 +21,16 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, ...@@ -21,6 +21,16 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
if (!bio) if (!bio)
return 0; return 0;
/*
* This should probably be returning 0, but blk_add_request_payload()
* (Christoph!!!!)
*/
if (bio->bi_rw & REQ_DISCARD)
return 1;
if (bio->bi_rw & REQ_WRITE_SAME)
return 1;
fbio = bio; fbio = bio;
cluster = blk_queue_cluster(q); cluster = blk_queue_cluster(q);
seg_size = 0; seg_size = 0;
...@@ -161,30 +171,60 @@ __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, ...@@ -161,30 +171,60 @@ __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
*bvprv = *bvec; *bvprv = *bvec;
} }
/* static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
* map a request to scatterlist, return number of sg entries setup. Caller struct scatterlist *sglist,
* must make sure sg can hold rq->nr_phys_segments entries struct scatterlist **sg)
*/
int blk_rq_map_sg(struct request_queue *q, struct request *rq,
struct scatterlist *sglist)
{ {
struct bio_vec bvec, bvprv = { NULL }; struct bio_vec bvec, bvprv = { NULL };
struct req_iterator iter; struct bvec_iter iter;
struct scatterlist *sg;
int nsegs, cluster; int nsegs, cluster;
nsegs = 0; nsegs = 0;
cluster = blk_queue_cluster(q); cluster = blk_queue_cluster(q);
/* if (bio->bi_rw & REQ_DISCARD) {
* for each bio in rq /*
*/ * This is a hack - drivers should be neither modifying the
sg = NULL; * biovec, nor relying on bi_vcnt - but because of
rq_for_each_segment(bvec, rq, iter) { * blk_add_request_payload(), a discard bio may or may not have
__blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg, * a payload we need to set up here (thank you Christoph) and
&nsegs, &cluster); * bi_vcnt is really the only way of telling if we need to.
} /* segments in rq */ */
if (bio->bi_vcnt)
goto single_segment;
return 0;
}
if (bio->bi_rw & REQ_WRITE_SAME) {
single_segment:
*sg = sglist;
bvec = bio_iovec(bio);
sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
return 1;
}
for_each_bio(bio)
bio_for_each_segment(bvec, bio, iter)
__blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
&nsegs, &cluster);
return nsegs;
}
/*
* map a request to scatterlist, return number of sg entries setup. Caller
* must make sure sg can hold rq->nr_phys_segments entries
*/
int blk_rq_map_sg(struct request_queue *q, struct request *rq,
struct scatterlist *sglist)
{
struct scatterlist *sg = NULL;
int nsegs = 0;
if (rq->bio)
nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
if (unlikely(rq->cmd_flags & REQ_COPY_USER) && if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
(blk_rq_bytes(rq) & q->dma_pad_mask)) { (blk_rq_bytes(rq) & q->dma_pad_mask)) {
...@@ -230,20 +270,13 @@ EXPORT_SYMBOL(blk_rq_map_sg); ...@@ -230,20 +270,13 @@ EXPORT_SYMBOL(blk_rq_map_sg);
int blk_bio_map_sg(struct request_queue *q, struct bio *bio, int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
struct scatterlist *sglist) struct scatterlist *sglist)
{ {
struct bio_vec bvec, bvprv = { NULL }; struct scatterlist *sg = NULL;
struct scatterlist *sg; int nsegs;
int nsegs, cluster; struct bio *next = bio->bi_next;
struct bvec_iter iter; bio->bi_next = NULL;
nsegs = 0;
cluster = blk_queue_cluster(q);
sg = NULL;
bio_for_each_segment(bvec, bio, iter) {
__blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg,
&nsegs, &cluster);
} /* segments in bio */
nsegs = __blk_bios_map_sg(q, bio, sglist, &sg);
bio->bi_next = next;
if (sg) if (sg)
sg_mark_end(sg); sg_mark_end(sg);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册