提交 8f0009a2 编写于 作者: M Milan Broz 提交者: Mike Snitzer

dm crypt: optionally support larger encryption sector size

Add  optional "sector_size"  parameter that specifies encryption sector
size (atomic unit of block device encryption).

Parameter can be in range 512 - 4096 bytes and must be power of two.
For compatibility reasons, the maximal IO must fit into the page limit,
so the limit is set to the minimal page size possible (4096 bytes).

NOTE: this device cannot yet be handled by cryptsetup if this parameter
is set.

IV for the sector is calculated from the 512 bytes sector offset unless
the iv_large_sectors option is used.

Test script using dmsetup:

  DEV="/dev/sdb"
  DEV_SIZE=$(blockdev --getsz $DEV)
  KEY="9c1185a5c5e9fc54612808977ee8f548b2258d31ddadef707ba62c166051b9e3cd0294c27515f2bccee924e8823ca6e124b8fc3167ed478bca702babe4e130ac"
  BLOCK_SIZE=4096

  # dmsetup create test_crypt --table "0 $DEV_SIZE crypt aes-xts-plain64 $KEY 0 $DEV 0 1 sector_size:$BLOCK_SIZE"
  # dmsetup table --showkeys test_crypt
Signed-off-by: NMilan Broz <gmazyland@gmail.com>
Signed-off-by: NMike Snitzer <snitzer@redhat.com>
上级 33d2f09f
...@@ -122,6 +122,20 @@ integrity:<bytes>:<type> ...@@ -122,6 +122,20 @@ integrity:<bytes>:<type>
integrity for the encrypted device. The additional space is then integrity for the encrypted device. The additional space is then
used for storing authentication tag (and persistent IV if needed). used for storing authentication tag (and persistent IV if needed).
sector_size:<bytes>
Use <bytes> as the encryption unit instead of 512 bytes sectors.
This option can be in range 512 - 4096 bytes and must be power of two.
Virtual device will announce this size as a minimal IO and logical sector.
iv_large_sectors
IV generators will use sector number counted in <sector_size> units
instead of default 512 bytes sectors.
For example, if <sector_size> is 4096 bytes, plain64 IV for the second
sector will be 8 (without flag) and 1 if iv_large_sectors is present.
The <iv_offset> must be multiple of <sector_size> (in 512 bytes units)
if this flag is specified.
Example scripts Example scripts
=============== ===============
LUKS (Linux Unified Key Setup) is now the preferred way to set up disk LUKS (Linux Unified Key Setup) is now the preferred way to set up disk
......
...@@ -129,6 +129,7 @@ enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, ...@@ -129,6 +129,7 @@ enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
enum cipher_flags { enum cipher_flags {
CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cihper */ CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cihper */
CRYPT_IV_LARGE_SECTORS, /* Calculate IV from sector_size, not 512B sectors */
}; };
/* /*
...@@ -171,6 +172,7 @@ struct crypt_config { ...@@ -171,6 +172,7 @@ struct crypt_config {
} iv_gen_private; } iv_gen_private;
sector_t iv_offset; sector_t iv_offset;
unsigned int iv_size; unsigned int iv_size;
unsigned int sector_size;
/* ESSIV: struct crypto_cipher *essiv_tfm */ /* ESSIV: struct crypto_cipher *essiv_tfm */
void *iv_private; void *iv_private;
...@@ -524,6 +526,11 @@ static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti, ...@@ -524,6 +526,11 @@ static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
{ {
struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
if (cc->sector_size != (1 << SECTOR_SHIFT)) {
ti->error = "Unsupported sector size for LMK";
return -EINVAL;
}
lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0); lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
if (IS_ERR(lmk->hash_tfm)) { if (IS_ERR(lmk->hash_tfm)) {
ti->error = "Error initializing LMK hash"; ti->error = "Error initializing LMK hash";
...@@ -677,6 +684,11 @@ static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti, ...@@ -677,6 +684,11 @@ static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
{ {
struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw; struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
if (cc->sector_size != (1 << SECTOR_SHIFT)) {
ti->error = "Unsupported sector size for TCW";
return -EINVAL;
}
if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) { if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
ti->error = "Wrong key size for TCW"; ti->error = "Wrong key size for TCW";
return -EINVAL; return -EINVAL;
...@@ -1037,15 +1049,20 @@ static int crypt_convert_block_aead(struct crypt_config *cc, ...@@ -1037,15 +1049,20 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in); struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out); struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
struct dm_crypt_request *dmreq; struct dm_crypt_request *dmreq;
unsigned int data_len = 1 << SECTOR_SHIFT;
u8 *iv, *org_iv, *tag_iv, *tag; u8 *iv, *org_iv, *tag_iv, *tag;
uint64_t *sector; uint64_t *sector;
int r = 0; int r = 0;
BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size); BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size);
/* Reject unexpected unaligned bio. */
if (unlikely(bv_in.bv_offset & (cc->sector_size - 1)))
return -EIO;
dmreq = dmreq_of_req(cc, req); dmreq = dmreq_of_req(cc, req);
dmreq->iv_sector = ctx->cc_sector; dmreq->iv_sector = ctx->cc_sector;
if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
sector_div(dmreq->iv_sector, cc->sector_size >> SECTOR_SHIFT);
dmreq->ctx = ctx; dmreq->ctx = ctx;
*org_tag_of_dmreq(cc, dmreq) = tag_offset; *org_tag_of_dmreq(cc, dmreq) = tag_offset;
...@@ -1066,13 +1083,13 @@ static int crypt_convert_block_aead(struct crypt_config *cc, ...@@ -1066,13 +1083,13 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
sg_init_table(dmreq->sg_in, 4); sg_init_table(dmreq->sg_in, 4);
sg_set_buf(&dmreq->sg_in[0], sector, sizeof(uint64_t)); sg_set_buf(&dmreq->sg_in[0], sector, sizeof(uint64_t));
sg_set_buf(&dmreq->sg_in[1], org_iv, cc->iv_size); sg_set_buf(&dmreq->sg_in[1], org_iv, cc->iv_size);
sg_set_page(&dmreq->sg_in[2], bv_in.bv_page, data_len, bv_in.bv_offset); sg_set_page(&dmreq->sg_in[2], bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
sg_set_buf(&dmreq->sg_in[3], tag, cc->integrity_tag_size); sg_set_buf(&dmreq->sg_in[3], tag, cc->integrity_tag_size);
sg_init_table(dmreq->sg_out, 4); sg_init_table(dmreq->sg_out, 4);
sg_set_buf(&dmreq->sg_out[0], sector, sizeof(uint64_t)); sg_set_buf(&dmreq->sg_out[0], sector, sizeof(uint64_t));
sg_set_buf(&dmreq->sg_out[1], org_iv, cc->iv_size); sg_set_buf(&dmreq->sg_out[1], org_iv, cc->iv_size);
sg_set_page(&dmreq->sg_out[2], bv_out.bv_page, data_len, bv_out.bv_offset); sg_set_page(&dmreq->sg_out[2], bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
sg_set_buf(&dmreq->sg_out[3], tag, cc->integrity_tag_size); sg_set_buf(&dmreq->sg_out[3], tag, cc->integrity_tag_size);
if (cc->iv_gen_ops) { if (cc->iv_gen_ops) {
...@@ -1094,14 +1111,14 @@ static int crypt_convert_block_aead(struct crypt_config *cc, ...@@ -1094,14 +1111,14 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
aead_request_set_ad(req, sizeof(uint64_t) + cc->iv_size); aead_request_set_ad(req, sizeof(uint64_t) + cc->iv_size);
if (bio_data_dir(ctx->bio_in) == WRITE) { if (bio_data_dir(ctx->bio_in) == WRITE) {
aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out, aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
data_len, iv); cc->sector_size, iv);
r = crypto_aead_encrypt(req); r = crypto_aead_encrypt(req);
if (cc->integrity_tag_size + cc->integrity_iv_size != cc->on_disk_tag_size) if (cc->integrity_tag_size + cc->integrity_iv_size != cc->on_disk_tag_size)
memset(tag + cc->integrity_tag_size + cc->integrity_iv_size, 0, memset(tag + cc->integrity_tag_size + cc->integrity_iv_size, 0,
cc->on_disk_tag_size - (cc->integrity_tag_size + cc->integrity_iv_size)); cc->on_disk_tag_size - (cc->integrity_tag_size + cc->integrity_iv_size));
} else { } else {
aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out, aead_request_set_crypt(req, dmreq->sg_in, dmreq->sg_out,
data_len + cc->integrity_tag_size, iv); cc->sector_size + cc->integrity_tag_size, iv);
r = crypto_aead_decrypt(req); r = crypto_aead_decrypt(req);
} }
...@@ -1112,8 +1129,8 @@ static int crypt_convert_block_aead(struct crypt_config *cc, ...@@ -1112,8 +1129,8 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
r = cc->iv_gen_ops->post(cc, org_iv, dmreq); r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
bio_advance_iter(ctx->bio_in, &ctx->iter_in, data_len); bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
bio_advance_iter(ctx->bio_out, &ctx->iter_out, data_len); bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
return r; return r;
} }
...@@ -1127,13 +1144,18 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc, ...@@ -1127,13 +1144,18 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out); struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
struct scatterlist *sg_in, *sg_out; struct scatterlist *sg_in, *sg_out;
struct dm_crypt_request *dmreq; struct dm_crypt_request *dmreq;
unsigned int data_len = 1 << SECTOR_SHIFT;
u8 *iv, *org_iv, *tag_iv; u8 *iv, *org_iv, *tag_iv;
uint64_t *sector; uint64_t *sector;
int r = 0; int r = 0;
/* Reject unexpected unaligned bio. */
if (unlikely(bv_in.bv_offset & (cc->sector_size - 1)))
return -EIO;
dmreq = dmreq_of_req(cc, req); dmreq = dmreq_of_req(cc, req);
dmreq->iv_sector = ctx->cc_sector; dmreq->iv_sector = ctx->cc_sector;
if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
sector_div(dmreq->iv_sector, cc->sector_size >> SECTOR_SHIFT);
dmreq->ctx = ctx; dmreq->ctx = ctx;
*org_tag_of_dmreq(cc, dmreq) = tag_offset; *org_tag_of_dmreq(cc, dmreq) = tag_offset;
...@@ -1150,10 +1172,10 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc, ...@@ -1150,10 +1172,10 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
sg_out = &dmreq->sg_out[0]; sg_out = &dmreq->sg_out[0];
sg_init_table(sg_in, 1); sg_init_table(sg_in, 1);
sg_set_page(sg_in, bv_in.bv_page, data_len, bv_in.bv_offset); sg_set_page(sg_in, bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
sg_init_table(sg_out, 1); sg_init_table(sg_out, 1);
sg_set_page(sg_out, bv_out.bv_page, data_len, bv_out.bv_offset); sg_set_page(sg_out, bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
if (cc->iv_gen_ops) { if (cc->iv_gen_ops) {
/* For READs use IV stored in integrity metadata */ /* For READs use IV stored in integrity metadata */
...@@ -1171,7 +1193,7 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc, ...@@ -1171,7 +1193,7 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
memcpy(iv, org_iv, cc->iv_size); memcpy(iv, org_iv, cc->iv_size);
} }
skcipher_request_set_crypt(req, sg_in, sg_out, data_len, iv); skcipher_request_set_crypt(req, sg_in, sg_out, cc->sector_size, iv);
if (bio_data_dir(ctx->bio_in) == WRITE) if (bio_data_dir(ctx->bio_in) == WRITE)
r = crypto_skcipher_encrypt(req); r = crypto_skcipher_encrypt(req);
...@@ -1181,8 +1203,8 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc, ...@@ -1181,8 +1203,8 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post) if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
r = cc->iv_gen_ops->post(cc, org_iv, dmreq); r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
bio_advance_iter(ctx->bio_in, &ctx->iter_in, data_len); bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
bio_advance_iter(ctx->bio_out, &ctx->iter_out, data_len); bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
return r; return r;
} }
...@@ -1268,6 +1290,7 @@ static int crypt_convert(struct crypt_config *cc, ...@@ -1268,6 +1290,7 @@ static int crypt_convert(struct crypt_config *cc,
struct convert_context *ctx) struct convert_context *ctx)
{ {
unsigned int tag_offset = 0; unsigned int tag_offset = 0;
unsigned int sector_step = cc->sector_size / (1 << SECTOR_SHIFT);
int r; int r;
atomic_set(&ctx->cc_pending, 1); atomic_set(&ctx->cc_pending, 1);
...@@ -1275,7 +1298,6 @@ static int crypt_convert(struct crypt_config *cc, ...@@ -1275,7 +1298,6 @@ static int crypt_convert(struct crypt_config *cc,
while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) { while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
crypt_alloc_req(cc, ctx); crypt_alloc_req(cc, ctx);
atomic_inc(&ctx->cc_pending); atomic_inc(&ctx->cc_pending);
if (crypt_integrity_aead(cc)) if (crypt_integrity_aead(cc))
...@@ -1298,16 +1320,16 @@ static int crypt_convert(struct crypt_config *cc, ...@@ -1298,16 +1320,16 @@ static int crypt_convert(struct crypt_config *cc,
*/ */
case -EINPROGRESS: case -EINPROGRESS:
ctx->r.req = NULL; ctx->r.req = NULL;
ctx->cc_sector++; ctx->cc_sector += sector_step;
tag_offset++; tag_offset += sector_step;
continue; continue;
/* /*
* The request was already processed (synchronously). * The request was already processed (synchronously).
*/ */
case 0: case 0:
atomic_dec(&ctx->cc_pending); atomic_dec(&ctx->cc_pending);
ctx->cc_sector++; ctx->cc_sector += sector_step;
tag_offset++; tag_offset += sector_step;
cond_resched(); cond_resched();
continue; continue;
/* /*
...@@ -2506,10 +2528,11 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar ...@@ -2506,10 +2528,11 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
struct crypt_config *cc = ti->private; struct crypt_config *cc = ti->private;
struct dm_arg_set as; struct dm_arg_set as;
static struct dm_arg _args[] = { static struct dm_arg _args[] = {
{0, 3, "Invalid number of feature args"}, {0, 6, "Invalid number of feature args"},
}; };
unsigned int opt_params, val; unsigned int opt_params, val;
const char *opt_string, *sval; const char *opt_string, *sval;
char dummy;
int ret; int ret;
/* Optional parameters */ /* Optional parameters */
...@@ -2552,7 +2575,16 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar ...@@ -2552,7 +2575,16 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
cc->cipher_auth = kstrdup(sval, GFP_KERNEL); cc->cipher_auth = kstrdup(sval, GFP_KERNEL);
if (!cc->cipher_auth) if (!cc->cipher_auth)
return -ENOMEM; return -ENOMEM;
} else { } else if (sscanf(opt_string, "sector_size:%u%c", &cc->sector_size, &dummy) == 1) {
if (cc->sector_size < (1 << SECTOR_SHIFT) ||
cc->sector_size > 4096 ||
(1 << ilog2(cc->sector_size) != cc->sector_size)) {
ti->error = "Invalid feature value for sector_size";
return -EINVAL;
}
} else if (!strcasecmp(opt_string, "iv_large_sectors"))
set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
else {
ti->error = "Invalid feature arguments"; ti->error = "Invalid feature arguments";
return -EINVAL; return -EINVAL;
} }
...@@ -2592,6 +2624,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -2592,6 +2624,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
return -ENOMEM; return -ENOMEM;
} }
cc->key_size = key_size; cc->key_size = key_size;
cc->sector_size = (1 << SECTOR_SHIFT);
ti->private = cc; ti->private = cc;
...@@ -2664,7 +2697,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -2664,7 +2697,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
mutex_init(&cc->bio_alloc_lock); mutex_init(&cc->bio_alloc_lock);
ret = -EINVAL; ret = -EINVAL;
if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) { if ((sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) ||
(tmpll & ((cc->sector_size >> SECTOR_SHIFT) - 1))) {
ti->error = "Invalid iv_offset sector"; ti->error = "Invalid iv_offset sector";
goto bad; goto bad;
} }
...@@ -2765,6 +2799,16 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) ...@@ -2765,6 +2799,16 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
(bio_data_dir(bio) == WRITE || cc->on_disk_tag_size)) (bio_data_dir(bio) == WRITE || cc->on_disk_tag_size))
dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT)); dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT));
/*
* Ensure that bio is a multiple of internal sector encryption size
* and is aligned to this size as defined in IO hints.
*/
if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0))
return -EIO;
if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1)))
return -EIO;
io = dm_per_bio_data(bio, cc->per_bio_data_size); io = dm_per_bio_data(bio, cc->per_bio_data_size);
crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
...@@ -2772,12 +2816,13 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) ...@@ -2772,12 +2816,13 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
unsigned tag_len = cc->on_disk_tag_size * bio_sectors(bio); unsigned tag_len = cc->on_disk_tag_size * bio_sectors(bio);
if (unlikely(tag_len > KMALLOC_MAX_SIZE) || if (unlikely(tag_len > KMALLOC_MAX_SIZE) ||
unlikely(!(io->integrity_metadata = kmalloc(tag_len, unlikely(!(io->integrity_metadata = kzalloc(tag_len,
GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)))) { GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
if (bio_sectors(bio) > cc->tag_pool_max_sectors) if (bio_sectors(bio) > cc->tag_pool_max_sectors)
dm_accept_partial_bio(bio, cc->tag_pool_max_sectors); dm_accept_partial_bio(bio, cc->tag_pool_max_sectors);
io->integrity_metadata = mempool_alloc(cc->tag_pool, GFP_NOIO); io->integrity_metadata = mempool_alloc(cc->tag_pool, GFP_NOIO);
io->integrity_metadata_from_pool = true; io->integrity_metadata_from_pool = true;
memset(io->integrity_metadata, 0, cc->tag_pool_max_sectors * (1 << SECTOR_SHIFT));
} }
} }
...@@ -2825,6 +2870,8 @@ static void crypt_status(struct dm_target *ti, status_type_t type, ...@@ -2825,6 +2870,8 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
num_feature_args += !!ti->num_discard_bios; num_feature_args += !!ti->num_discard_bios;
num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags); num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags); num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
num_feature_args += (cc->sector_size != (1 << SECTOR_SHIFT)) ? 1 : 0;
num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
if (cc->on_disk_tag_size) if (cc->on_disk_tag_size)
num_feature_args++; num_feature_args++;
if (num_feature_args) { if (num_feature_args) {
...@@ -2837,6 +2884,10 @@ static void crypt_status(struct dm_target *ti, status_type_t type, ...@@ -2837,6 +2884,10 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
DMEMIT(" submit_from_crypt_cpus"); DMEMIT(" submit_from_crypt_cpus");
if (cc->on_disk_tag_size) if (cc->on_disk_tag_size)
DMEMIT(" integrity:%u:%s", cc->on_disk_tag_size, cc->cipher_auth); DMEMIT(" integrity:%u:%s", cc->on_disk_tag_size, cc->cipher_auth);
if (cc->sector_size != (1 << SECTOR_SHIFT))
DMEMIT(" sector_size:%d", cc->sector_size);
if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
DMEMIT(" iv_large_sectors");
} }
break; break;
...@@ -2926,6 +2977,8 @@ static int crypt_iterate_devices(struct dm_target *ti, ...@@ -2926,6 +2977,8 @@ static int crypt_iterate_devices(struct dm_target *ti,
static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
{ {
struct crypt_config *cc = ti->private;
/* /*
* Unfortunate constraint that is required to avoid the potential * Unfortunate constraint that is required to avoid the potential
* for exceeding underlying device's max_segments limits -- due to * for exceeding underlying device's max_segments limits -- due to
...@@ -2933,11 +2986,17 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) ...@@ -2933,11 +2986,17 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
* bio that are not as physically contiguous as the original bio. * bio that are not as physically contiguous as the original bio.
*/ */
limits->max_segment_size = PAGE_SIZE; limits->max_segment_size = PAGE_SIZE;
if (cc->sector_size != (1 << SECTOR_SHIFT)) {
limits->logical_block_size = cc->sector_size;
limits->physical_block_size = cc->sector_size;
blk_limits_io_min(limits, cc->sector_size);
}
} }
static struct target_type crypt_target = { static struct target_type crypt_target = {
.name = "crypt", .name = "crypt",
.version = {1, 16, 0}, .version = {1, 17, 0},
.module = THIS_MODULE, .module = THIS_MODULE,
.ctr = crypt_ctr, .ctr = crypt_ctr,
.dtr = crypt_dtr, .dtr = crypt_dtr,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册