提交 70d6c400 编写于 作者: M Mike Snitzer 提交者: Alasdair G Kergon

dm kcopyd: add WRITE SAME support to dm_kcopyd_zero

Add WRITE SAME support to dm-io and make it accessible to
dm_kcopyd_zero().  dm_kcopyd_zero() provides an asynchronous interface
whereas the blkdev_issue_write_same() interface is synchronous.

WRITE SAME is a SCSI command that can be leveraged for more efficient
zeroing of a specified logical extent of a device which supports it.
Only a single zeroed logical block is transfered to the target for each
WRITE SAME and the target then writes that same block across the
specified extent.

The dm thin target uses this.
Signed-off-by: NMike Snitzer <snitzer@redhat.com>
Signed-off-by: NAlasdair G Kergon <agk@redhat.com>
上级 4f0b70b0
...@@ -287,7 +287,8 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, ...@@ -287,7 +287,8 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
unsigned num_bvecs; unsigned num_bvecs;
sector_t remaining = where->count; sector_t remaining = where->count;
struct request_queue *q = bdev_get_queue(where->bdev); struct request_queue *q = bdev_get_queue(where->bdev);
sector_t discard_sectors; unsigned short logical_block_size = queue_logical_block_size(q);
sector_t num_sectors;
/* /*
* where->count may be zero if rw holds a flush and we need to * where->count may be zero if rw holds a flush and we need to
...@@ -297,7 +298,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, ...@@ -297,7 +298,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
/* /*
* Allocate a suitably sized-bio. * Allocate a suitably sized-bio.
*/ */
if (rw & REQ_DISCARD) if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME))
num_bvecs = 1; num_bvecs = 1;
else else
num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev),
...@@ -310,9 +311,21 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, ...@@ -310,9 +311,21 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
store_io_and_region_in_bio(bio, io, region); store_io_and_region_in_bio(bio, io, region);
if (rw & REQ_DISCARD) { if (rw & REQ_DISCARD) {
discard_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining); num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
bio->bi_size = discard_sectors << SECTOR_SHIFT; bio->bi_size = num_sectors << SECTOR_SHIFT;
remaining -= discard_sectors; remaining -= num_sectors;
} else if (rw & REQ_WRITE_SAME) {
/*
* WRITE SAME only uses a single page.
*/
dp->get_page(dp, &page, &len, &offset);
bio_add_page(bio, page, logical_block_size, offset);
num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining);
bio->bi_size = num_sectors << SECTOR_SHIFT;
offset = 0;
remaining -= num_sectors;
dp->next_page(dp);
} else while (remaining) { } else while (remaining) {
/* /*
* Try and add as many pages as possible. * Try and add as many pages as possible.
......
...@@ -349,7 +349,7 @@ static void complete_io(unsigned long error, void *context) ...@@ -349,7 +349,7 @@ static void complete_io(unsigned long error, void *context)
struct dm_kcopyd_client *kc = job->kc; struct dm_kcopyd_client *kc = job->kc;
if (error) { if (error) {
if (job->rw == WRITE) if (job->rw & WRITE)
job->write_err |= error; job->write_err |= error;
else else
job->read_err = 1; job->read_err = 1;
...@@ -361,7 +361,7 @@ static void complete_io(unsigned long error, void *context) ...@@ -361,7 +361,7 @@ static void complete_io(unsigned long error, void *context)
} }
} }
if (job->rw == WRITE) if (job->rw & WRITE)
push(&kc->complete_jobs, job); push(&kc->complete_jobs, job);
else { else {
...@@ -432,7 +432,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc, ...@@ -432,7 +432,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
if (r < 0) { if (r < 0) {
/* error this rogue job */ /* error this rogue job */
if (job->rw == WRITE) if (job->rw & WRITE)
job->write_err = (unsigned long) -1L; job->write_err = (unsigned long) -1L;
else else
job->read_err = 1; job->read_err = 1;
...@@ -585,6 +585,7 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, ...@@ -585,6 +585,7 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
unsigned int flags, dm_kcopyd_notify_fn fn, void *context) unsigned int flags, dm_kcopyd_notify_fn fn, void *context)
{ {
struct kcopyd_job *job; struct kcopyd_job *job;
int i;
/* /*
* Allocate an array of jobs consisting of one master job * Allocate an array of jobs consisting of one master job
...@@ -611,7 +612,16 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, ...@@ -611,7 +612,16 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
memset(&job->source, 0, sizeof job->source); memset(&job->source, 0, sizeof job->source);
job->source.count = job->dests[0].count; job->source.count = job->dests[0].count;
job->pages = &zero_page_list; job->pages = &zero_page_list;
/*
* Use WRITE SAME to optimize zeroing if all dests support it.
*/
job->rw = WRITE | REQ_WRITE_SAME;
for (i = 0; i < job->num_dests; i++)
if (!bdev_write_same(job->dests[i].bdev)) {
job->rw = WRITE; job->rw = WRITE;
break;
}
} }
job->fn = fn; job->fn = fn;
......
...@@ -2779,7 +2779,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) ...@@ -2779,7 +2779,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type thin_target = { static struct target_type thin_target = {
.name = "thin", .name = "thin",
.version = {1, 5, 0}, .version = {1, 6, 0},
.module = THIS_MODULE, .module = THIS_MODULE,
.ctr = thin_ctr, .ctr = thin_ctr,
.dtr = thin_dtr, .dtr = thin_dtr,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册