提交 542f9038 编写于 作者: M Mike Snitzer 提交者: Alasdair G Kergon

dm: support non power of two target max_io_len

Remove the restriction that limits a target's specified maximum incoming
I/O size to be a power of 2.

Rename this setting from 'split_io' to the less-ambiguous 'max_io_len'.
Change it from sector_t to uint32_t, which is plenty big enough, and
introduce a wrapper function dm_set_target_max_io_len() to set it.
Use sector_div() to process it now that it is not necessarily a power of 2.
Signed-off-by: NMike Snitzer <snitzer@redhat.com>
Signed-off-by: NAlasdair G Kergon <agk@redhat.com>
上级 1df05483
...@@ -353,6 +353,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv, ...@@ -353,6 +353,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
{ {
unsigned i, rebuild_cnt = 0; unsigned i, rebuild_cnt = 0;
unsigned long value, region_size = 0; unsigned long value, region_size = 0;
sector_t max_io_len;
char *key; char *key;
/* /*
...@@ -522,14 +523,12 @@ static int parse_raid_params(struct raid_set *rs, char **argv, ...@@ -522,14 +523,12 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
return -EINVAL; return -EINVAL;
if (rs->md.chunk_sectors) if (rs->md.chunk_sectors)
rs->ti->split_io = rs->md.chunk_sectors; max_io_len = rs->md.chunk_sectors;
else else
rs->ti->split_io = region_size; max_io_len = region_size;
if (rs->md.chunk_sectors) if (dm_set_target_max_io_len(rs->ti, max_io_len))
rs->ti->split_io = rs->md.chunk_sectors; return -EINVAL;
else
rs->ti->split_io = region_size;
/* Assume there are no metadata devices until the drives are parsed */ /* Assume there are no metadata devices until the drives are parsed */
rs->md.persistent = 0; rs->md.persistent = 0;
......
...@@ -1081,7 +1081,11 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -1081,7 +1081,11 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
} }
ti->private = ms; ti->private = ms;
ti->split_io = dm_rh_get_region_size(ms->rh);
r = dm_set_target_max_io_len(ti, dm_rh_get_region_size(ms->rh));
if (r)
goto err_free_context;
ti->num_flush_requests = 1; ti->num_flush_requests = 1;
ti->num_discard_requests = 1; ti->num_discard_requests = 1;
ti->discard_zeroes_data_unsupported = 1; ti->discard_zeroes_data_unsupported = 1;
......
...@@ -691,7 +691,7 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new) ...@@ -691,7 +691,7 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new)
* Return a minimum chunk size of all snapshots that have the specified origin. * Return a minimum chunk size of all snapshots that have the specified origin.
* Return zero if the origin has no snapshots. * Return zero if the origin has no snapshots.
*/ */
static sector_t __minimum_chunk_size(struct origin *o) static uint32_t __minimum_chunk_size(struct origin *o)
{ {
struct dm_snapshot *snap; struct dm_snapshot *snap;
unsigned chunk_size = 0; unsigned chunk_size = 0;
...@@ -701,7 +701,7 @@ static sector_t __minimum_chunk_size(struct origin *o) ...@@ -701,7 +701,7 @@ static sector_t __minimum_chunk_size(struct origin *o)
chunk_size = min_not_zero(chunk_size, chunk_size = min_not_zero(chunk_size,
snap->store->chunk_size); snap->store->chunk_size);
return chunk_size; return (uint32_t) chunk_size;
} }
/* /*
...@@ -1172,7 +1172,10 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -1172,7 +1172,10 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->error = "Chunk size not set"; ti->error = "Chunk size not set";
goto bad_read_metadata; goto bad_read_metadata;
} }
ti->split_io = s->store->chunk_size;
r = dm_set_target_max_io_len(ti, s->store->chunk_size);
if (r)
goto bad_read_metadata;
return 0; return 0;
...@@ -1239,7 +1242,7 @@ static void __handover_exceptions(struct dm_snapshot *snap_src, ...@@ -1239,7 +1242,7 @@ static void __handover_exceptions(struct dm_snapshot *snap_src,
snap_dest->store->snap = snap_dest; snap_dest->store->snap = snap_dest;
snap_src->store->snap = snap_src; snap_src->store->snap = snap_src;
snap_dest->ti->split_io = snap_dest->store->chunk_size; snap_dest->ti->max_io_len = snap_dest->store->chunk_size;
snap_dest->valid = snap_src->valid; snap_dest->valid = snap_src->valid;
/* /*
...@@ -1817,9 +1820,9 @@ static void snapshot_resume(struct dm_target *ti) ...@@ -1817,9 +1820,9 @@ static void snapshot_resume(struct dm_target *ti)
up_write(&s->lock); up_write(&s->lock);
} }
static sector_t get_origin_minimum_chunksize(struct block_device *bdev) static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
{ {
sector_t min_chunksize; uint32_t min_chunksize;
down_read(&_origins_lock); down_read(&_origins_lock);
min_chunksize = __minimum_chunk_size(__lookup_origin(bdev)); min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
...@@ -1838,9 +1841,9 @@ static void snapshot_merge_resume(struct dm_target *ti) ...@@ -1838,9 +1841,9 @@ static void snapshot_merge_resume(struct dm_target *ti)
snapshot_resume(ti); snapshot_resume(ti);
/* /*
* snapshot-merge acts as an origin, so set ti->split_io * snapshot-merge acts as an origin, so set ti->max_io_len
*/ */
ti->split_io = get_origin_minimum_chunksize(s->origin->bdev); ti->max_io_len = get_origin_minimum_chunksize(s->origin->bdev);
start_merge(s); start_merge(s);
} }
...@@ -2073,12 +2076,12 @@ static int origin_write_extent(struct dm_snapshot *merging_snap, ...@@ -2073,12 +2076,12 @@ static int origin_write_extent(struct dm_snapshot *merging_snap,
struct origin *o; struct origin *o;
/* /*
* The origin's __minimum_chunk_size() got stored in split_io * The origin's __minimum_chunk_size() got stored in max_io_len
* by snapshot_merge_resume(). * by snapshot_merge_resume().
*/ */
down_read(&_origins_lock); down_read(&_origins_lock);
o = __lookup_origin(merging_snap->origin->bdev); o = __lookup_origin(merging_snap->origin->bdev);
for (n = 0; n < size; n += merging_snap->ti->split_io) for (n = 0; n < size; n += merging_snap->ti->max_io_len)
if (__origin_write(&o->snapshots, sector + n, NULL) == if (__origin_write(&o->snapshots, sector + n, NULL) ==
DM_MAPIO_SUBMITTED) DM_MAPIO_SUBMITTED)
must_wait = 1; must_wait = 1;
...@@ -2138,14 +2141,14 @@ static int origin_map(struct dm_target *ti, struct bio *bio, ...@@ -2138,14 +2141,14 @@ static int origin_map(struct dm_target *ti, struct bio *bio,
} }
/* /*
* Set the target "split_io" field to the minimum of all the snapshots' * Set the target "max_io_len" field to the minimum of all the snapshots'
* chunk sizes. * chunk sizes.
*/ */
static void origin_resume(struct dm_target *ti) static void origin_resume(struct dm_target *ti)
{ {
struct dm_dev *dev = ti->private; struct dm_dev *dev = ti->private;
ti->split_io = get_origin_minimum_chunksize(dev->bdev); ti->max_io_len = get_origin_minimum_chunksize(dev->bdev);
} }
static int origin_status(struct dm_target *ti, status_type_t type, char *result, static int origin_status(struct dm_target *ti, status_type_t type, char *result,
......
...@@ -165,7 +165,10 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -165,7 +165,10 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
else else
sc->stripes_shift = __ffs(stripes); sc->stripes_shift = __ffs(stripes);
ti->split_io = chunk_size; r = dm_set_target_max_io_len(ti, chunk_size);
if (r)
return r;
ti->num_flush_requests = stripes; ti->num_flush_requests = stripes;
ti->num_discard_requests = stripes; ti->num_discard_requests = stripes;
......
...@@ -2628,7 +2628,10 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) ...@@ -2628,7 +2628,10 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad_thin_open; goto bad_thin_open;
} }
ti->split_io = tc->pool->sectors_per_block; r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
if (r)
goto bad_thin_open;
ti->num_flush_requests = 1; ti->num_flush_requests = 1;
/* In case the pool supports discards, pass them on. */ /* In case the pool supports discards, pass them on. */
......
...@@ -968,22 +968,41 @@ static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti ...@@ -968,22 +968,41 @@ static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti
static sector_t max_io_len(sector_t sector, struct dm_target *ti) static sector_t max_io_len(sector_t sector, struct dm_target *ti)
{ {
sector_t len = max_io_len_target_boundary(sector, ti); sector_t len = max_io_len_target_boundary(sector, ti);
sector_t offset, max_len;
/* /*
* Does the target need to split even further ? * Does the target need to split even further?
*/ */
if (ti->split_io) { if (ti->max_io_len) {
sector_t boundary; offset = dm_target_offset(ti, sector);
sector_t offset = dm_target_offset(ti, sector); if (unlikely(ti->max_io_len & (ti->max_io_len - 1)))
boundary = ((offset + ti->split_io) & ~(ti->split_io - 1)) max_len = sector_div(offset, ti->max_io_len);
- offset; else
if (len > boundary) max_len = offset & (ti->max_io_len - 1);
len = boundary; max_len = ti->max_io_len - max_len;
if (len > max_len)
len = max_len;
} }
return len; return len;
} }
int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
{
if (len > UINT_MAX) {
DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
(unsigned long long)len, UINT_MAX);
ti->error = "Maximum size of target IO is too large";
return -EINVAL;
}
ti->max_io_len = (uint32_t) len;
return 0;
}
EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
static void __map_bio(struct dm_target *ti, struct bio *clone, static void __map_bio(struct dm_target *ti, struct bio *clone,
struct dm_target_io *tio) struct dm_target_io *tio)
{ {
......
...@@ -186,8 +186,8 @@ struct dm_target { ...@@ -186,8 +186,8 @@ struct dm_target {
sector_t begin; sector_t begin;
sector_t len; sector_t len;
/* Always a power of 2 */ /* If non-zero, maximum size of I/O submitted to a target. */
sector_t split_io; uint32_t max_io_len;
/* /*
* A number of zero-length barrier requests that will be submitted * A number of zero-length barrier requests that will be submitted
...@@ -357,6 +357,11 @@ void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callback ...@@ -357,6 +357,11 @@ void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callback
*/ */
int dm_table_complete(struct dm_table *t); int dm_table_complete(struct dm_table *t);
/*
* Target may require that it is never sent I/O larger than len.
*/
int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
/* /*
* Table reference counting. * Table reference counting.
*/ */
......
...@@ -268,8 +268,8 @@ enum { ...@@ -268,8 +268,8 @@ enum {
#define DM_VERSION_MAJOR 4 #define DM_VERSION_MAJOR 4
#define DM_VERSION_MINOR 22 #define DM_VERSION_MINOR 22
#define DM_VERSION_PATCHLEVEL 0 #define DM_VERSION_PATCHLEVEL 1
#define DM_VERSION_EXTRA "-ioctl (2011-10-19)" #define DM_VERSION_EXTRA "-ioctl (2012-06-01)"
/* Status bits */ /* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册