提交 d2430f2c 编写于 作者: X Xiongfeng Wang 提交者: Yang Yingliang

Revert "dm-crypt: Add IV generation templates"

hulk inclusion
category: bugfix
bugzilla: 31797
CVE: NA

--------------------------------

We come across a KASAN double-free issue which seems to be related with
this patch. Let's revert this patch for now.

This reverts commit 3449c349585d560f37db2fb938347eb37e78bcae.
Signed-off-by: NXiongfeng Wang <wangxiongfeng2@huawei.com>
Reviewed-by: NZhangXiaoxu <zhangxiaoxu5@huawei.com>
Reviewed-by: NHou Tao <houtao1@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 6efe2626
......@@ -33,60 +33,13 @@
#include <crypto/skcipher.h>
#include <crypto/aead.h>
#include <crypto/authenc.h>
#include <crypto/geniv.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
#include <linux/rtnetlink.h> /* for struct rtattr and RTA macros only */
#include <keys/user-type.h>
#include <linux/backing-dev.h>
#include <linux/device-mapper.h>
#include <linux/log2.h>
#define DM_MSG_PREFIX "crypt"
struct geniv_ctx;
struct geniv_req_ctx;
/* Sub request for each of the skcipher_request's for a segment */
struct geniv_subreq {
struct scatterlist sg_in[4];
struct scatterlist sg_out[4];
sector_t iv_sector;
struct geniv_req_ctx *rctx;
union {
struct skcipher_request req;
struct aead_request req_aead;
} r CRYPTO_MINALIGN_ATTR;
};
/* used to iter the src scatterlist of the input parent request */
struct scatterlist_iter {
/* current segment to be processed */
unsigned int seg_no;
/* bytes had been processed in current segment */
unsigned int done;
/* bytes to be processed in the next request */
unsigned int len;
};
/* contex of the input parent request */
struct geniv_req_ctx {
struct geniv_subreq *subreq;
bool is_write;
bool is_aead_request;
sector_t cc_sector;
/* array size of src scatterlist of parent request */
unsigned int nents;
struct scatterlist_iter iter;
struct completion restart;
atomic_t req_pending;
u8 *integrity_metadata;
/* point to the input parent request */
union {
struct skcipher_request *req;
struct aead_request *req_aead;
} r;
};
/*
* context holding the current state of a multi-part conversion
*/
......@@ -145,19 +98,6 @@ struct crypt_iv_operations {
struct dm_crypt_request *dmreq);
};
struct crypt_geniv_operations {
int (*ctr)(struct geniv_ctx *ctx);
void (*dtr)(struct geniv_ctx *ctx);
int (*init)(struct geniv_ctx *ctx);
int (*wipe)(struct geniv_ctx *ctx);
int (*generator)(struct geniv_ctx *ctx,
struct geniv_req_ctx *rctx,
struct geniv_subreq *subreq, u8 *iv);
int (*post)(struct geniv_ctx *ctx,
struct geniv_req_ctx *rctx,
struct geniv_subreq *subreq, u8 *iv);
};
struct iv_essiv_private {
struct crypto_shash *hash_tfm;
u8 *salt;
......@@ -280,7 +220,6 @@ struct crypt_config {
#define MIN_IOS 64
#define MAX_TAG_SIZE 480
#define POOL_ENTRY_SIZE 512
#define SECTOR_MASK ((1 << SECTOR_SHIFT) - 1)
static DEFINE_SPINLOCK(dm_crypt_clients_lock);
static unsigned dm_crypt_clients_n = 0;
......@@ -306,55 +245,6 @@ static struct crypto_aead *any_tfm_aead(struct crypt_config *cc)
return cc->cipher_tfm.tfms_aead[0];
}
/* context of geniv tfm */
struct geniv_ctx {
unsigned int tfms_count;
union {
struct crypto_skcipher *tfm;
struct crypto_aead *tfm_aead;
} tfm_child;
union {
struct crypto_skcipher **tfms;
struct crypto_aead **tfms_aead;
} tfms;
char *ivmode;
unsigned int iv_size;
unsigned int iv_start;
unsigned int rctx_start;
sector_t iv_offset;
unsigned short int sector_size;
unsigned char sector_shift;
char *algname;
char *ivopts;
char *cipher;
char *ciphermode;
unsigned long cipher_flags;
const struct crypt_geniv_operations *iv_gen_ops;
union {
struct iv_essiv_private essiv;
struct iv_benbi_private benbi;
struct iv_lmk_private lmk;
struct iv_tcw_private tcw;
} iv_gen_private;
void *iv_private;
mempool_t *subreq_pool;
unsigned int key_size;
unsigned int key_parts; /* independent parts in key buffer */
unsigned int key_extra_size; /* additional keys length */
unsigned int key_mac_size;
unsigned int integrity_tag_size;
unsigned int integrity_iv_size;
unsigned int on_disk_tag_size;
char *msg;
u8 *authenc_key; /* space for keys in authenc() format (if used) */
u8 *key;
};
/*
* Different IV generation algorithms:
*
......@@ -748,1662 +638,262 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
return r;
for (i = 0; i < MD5_HASH_WORDS; i++)
__cpu_to_le32s(&md5state.hash[i]);
memcpy(iv, &md5state.hash, cc->iv_size);
return 0;
}
static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
struct dm_crypt_request *dmreq)
{
struct scatterlist *sg;
u8 *src;
int r = 0;
if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
sg = crypt_get_sg_data(cc, dmreq->sg_in);
src = kmap_atomic(sg_page(sg));
r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset);
kunmap_atomic(src);
} else
memset(iv, 0, cc->iv_size);
return r;
}
static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
struct dm_crypt_request *dmreq)
{
struct scatterlist *sg;
u8 *dst;
int r;
if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
return 0;
sg = crypt_get_sg_data(cc, dmreq->sg_out);
dst = kmap_atomic(sg_page(sg));
r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset);
/* Tweak the first block of plaintext sector */
if (!r)
crypto_xor(dst + sg->offset, iv, cc->iv_size);
kunmap_atomic(dst);
return r;
}
static void crypt_iv_tcw_dtr(struct crypt_config *cc)
{
struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
kzfree(tcw->iv_seed);
tcw->iv_seed = NULL;
kzfree(tcw->whitening);
tcw->whitening = NULL;
if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
crypto_free_shash(tcw->crc32_tfm);
tcw->crc32_tfm = NULL;
}
static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
const char *opts)
{
struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
if (cc->sector_size != (1 << SECTOR_SHIFT)) {
ti->error = "Unsupported sector size for TCW";
return -EINVAL;
}
if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
ti->error = "Wrong key size for TCW";
return -EINVAL;
}
tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0);
if (IS_ERR(tcw->crc32_tfm)) {
ti->error = "Error initializing CRC32 in TCW";
return PTR_ERR(tcw->crc32_tfm);
}
tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
if (!tcw->iv_seed || !tcw->whitening) {
crypt_iv_tcw_dtr(cc);
ti->error = "Error allocating seed storage in TCW";
return -ENOMEM;
}
return 0;
}
static int crypt_iv_tcw_init(struct crypt_config *cc)
{
struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
TCW_WHITENING_SIZE);
return 0;
}
static int crypt_iv_tcw_wipe(struct crypt_config *cc)
{
struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
memset(tcw->iv_seed, 0, cc->iv_size);
memset(tcw->whitening, 0, TCW_WHITENING_SIZE);
return 0;
}
static int crypt_iv_tcw_whitening(struct crypt_config *cc,
struct dm_crypt_request *dmreq,
u8 *data)
{
struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
__le64 sector = cpu_to_le64(dmreq->iv_sector);
u8 buf[TCW_WHITENING_SIZE];
SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm);
int i, r;
/* xor whitening with sector number */
crypto_xor_cpy(buf, tcw->whitening, (u8 *)&sector, 8);
crypto_xor_cpy(&buf[8], tcw->whitening + 8, (u8 *)&sector, 8);
/* calculate crc32 for every 32bit part and xor it */
desc->tfm = tcw->crc32_tfm;
desc->flags = 0;
for (i = 0; i < 4; i++) {
r = crypto_shash_init(desc);
if (r)
goto out;
r = crypto_shash_update(desc, &buf[i * 4], 4);
if (r)
goto out;
r = crypto_shash_final(desc, &buf[i * 4]);
if (r)
goto out;
}
crypto_xor(&buf[0], &buf[12], 4);
crypto_xor(&buf[4], &buf[8], 4);
/* apply whitening (8 bytes) to whole sector */
for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
crypto_xor(data + i * 8, buf, 8);
out:
memzero_explicit(buf, sizeof(buf));
return r;
}
static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
struct dm_crypt_request *dmreq)
{
struct scatterlist *sg;
struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
__le64 sector = cpu_to_le64(dmreq->iv_sector);
u8 *src;
int r = 0;
/* Remove whitening from ciphertext */
if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
sg = crypt_get_sg_data(cc, dmreq->sg_in);
src = kmap_atomic(sg_page(sg));
r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset);
kunmap_atomic(src);
}
/* Calculate IV */
crypto_xor_cpy(iv, tcw->iv_seed, (u8 *)&sector, 8);
if (cc->iv_size > 8)
crypto_xor_cpy(&iv[8], tcw->iv_seed + 8, (u8 *)&sector,
cc->iv_size - 8);
return r;
}
static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
struct dm_crypt_request *dmreq)
{
struct scatterlist *sg;
u8 *dst;
int r;
if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
return 0;
/* Apply whitening on ciphertext */
sg = crypt_get_sg_data(cc, dmreq->sg_out);
dst = kmap_atomic(sg_page(sg));
r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset);
kunmap_atomic(dst);
return r;
}
static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv,
struct dm_crypt_request *dmreq)
{
/* Used only for writes, there must be an additional space to store IV */
get_random_bytes(iv, cc->iv_size);
return 0;
}
static const struct crypt_iv_operations crypt_iv_plain_ops = {
.generator = crypt_iv_plain_gen
};
static const struct crypt_iv_operations crypt_iv_plain64_ops = {
.generator = crypt_iv_plain64_gen
};
static const struct crypt_iv_operations crypt_iv_plain64be_ops = {
.generator = crypt_iv_plain64be_gen
};
static const struct crypt_iv_operations crypt_iv_essiv_ops = {
.ctr = crypt_iv_essiv_ctr,
.dtr = crypt_iv_essiv_dtr,
.init = crypt_iv_essiv_init,
.wipe = crypt_iv_essiv_wipe,
.generator = crypt_iv_essiv_gen
};
static const struct crypt_iv_operations crypt_iv_benbi_ops = {
.ctr = crypt_iv_benbi_ctr,
.dtr = crypt_iv_benbi_dtr,
.generator = crypt_iv_benbi_gen
};
static const struct crypt_iv_operations crypt_iv_null_ops = {
.generator = crypt_iv_null_gen
};
static const struct crypt_iv_operations crypt_iv_lmk_ops = {
.ctr = crypt_iv_lmk_ctr,
.dtr = crypt_iv_lmk_dtr,
.init = crypt_iv_lmk_init,
.wipe = crypt_iv_lmk_wipe,
.generator = crypt_iv_lmk_gen,
.post = crypt_iv_lmk_post
};
static const struct crypt_iv_operations crypt_iv_tcw_ops = {
.ctr = crypt_iv_tcw_ctr,
.dtr = crypt_iv_tcw_dtr,
.init = crypt_iv_tcw_init,
.wipe = crypt_iv_tcw_wipe,
.generator = crypt_iv_tcw_gen,
.post = crypt_iv_tcw_post
};
static struct crypt_iv_operations crypt_iv_random_ops = {
.generator = crypt_iv_random_gen
};
static bool geniv_integrity_aead(struct geniv_ctx *ctx)
{
return test_bit(CRYPT_MODE_INTEGRITY_AEAD, &ctx->cipher_flags);
}
static bool geniv_integrity_hmac(struct geniv_ctx *ctx)
{
return geniv_integrity_aead(ctx) && ctx->key_mac_size;
}
static struct geniv_req_ctx *geniv_skcipher_req_ctx(struct skcipher_request *req)
{
return (void *)PTR_ALIGN((u8 *)skcipher_request_ctx(req), __alignof__(struct geniv_req_ctx));
}
static struct geniv_req_ctx *geniv_aead_req_ctx(struct aead_request *req)
{
return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), __alignof__(struct geniv_req_ctx));
}
static u8 *iv_of_subreq(struct geniv_ctx *ctx, struct geniv_subreq *subreq)
{
if (geniv_integrity_aead(ctx))
return (u8 *)ALIGN((unsigned long)((char *)subreq + ctx->iv_start),
crypto_aead_alignmask(crypto_aead_reqtfm(subreq->rctx->r.req_aead)) + 1);
else
return (u8 *)ALIGN((unsigned long)((char *)subreq + ctx->iv_start),
crypto_skcipher_alignmask(crypto_skcipher_reqtfm(subreq->rctx->r.req)) + 1);
}
static const struct crypt_geniv_operations crypt_geniv_plain_ops;
static const struct crypt_geniv_operations crypt_geniv_plain64_ops;
static const struct crypt_geniv_operations crypt_geniv_essiv_ops;
static const struct crypt_geniv_operations crypt_geniv_benbi_ops;
static const struct crypt_geniv_operations crypt_geniv_null_ops;
static const struct crypt_geniv_operations crypt_geniv_lmk_ops;
static const struct crypt_geniv_operations crypt_geniv_tcw_ops;
static const struct crypt_geniv_operations crypt_geniv_random_ops;
static int geniv_init_iv(struct geniv_ctx *ctx)
{
int ret;
DMDEBUG("IV Generation algorithm : %s\n", ctx->ivmode);
if (ctx->ivmode == NULL)
ctx->iv_gen_ops = NULL;
else if (strcmp(ctx->ivmode, "plain") == 0)
ctx->iv_gen_ops = &crypt_geniv_plain_ops;
else if (strcmp(ctx->ivmode, "plain64") == 0)
ctx->iv_gen_ops = &crypt_geniv_plain64_ops;
else if (strcmp(ctx->ivmode, "essiv") == 0)
ctx->iv_gen_ops = &crypt_geniv_essiv_ops;
else if (strcmp(ctx->ivmode, "benbi") == 0)
ctx->iv_gen_ops = &crypt_geniv_benbi_ops;
else if (strcmp(ctx->ivmode, "null") == 0)
ctx->iv_gen_ops = &crypt_geniv_null_ops;
else if (strcmp(ctx->ivmode, "lmk") == 0) {
ctx->iv_gen_ops = &crypt_geniv_lmk_ops;
/*
* Version 2 and 3 is recognised according
* to length of provided multi-key string.
* If present (version 3), last key is used as IV seed.
* All keys (including IV seed) are always the same size.
*/
if (ctx->key_size % ctx->key_parts) {
ctx->key_parts++;
ctx->key_extra_size = ctx->key_size / ctx->key_parts;
}
} else if (strcmp(ctx->ivmode, "tcw") == 0) {
ctx->iv_gen_ops = &crypt_geniv_tcw_ops;
ctx->key_parts += 2; /* IV + whitening */
ctx->key_extra_size = ctx->iv_size + TCW_WHITENING_SIZE;
} else if (strcmp(ctx->ivmode, "random") == 0) {
ctx->iv_gen_ops = &crypt_geniv_random_ops;
/* Need storage space in integrity fields. */
ctx->integrity_iv_size = ctx->iv_size;
} else {
DMERR("Invalid IV mode %s\n", ctx->ivmode);
return -EINVAL;
}
/* Allocate IV */
if (ctx->iv_gen_ops && ctx->iv_gen_ops->ctr) {
ret = ctx->iv_gen_ops->ctr(ctx);
if (ret < 0) {
DMERR("Error creating IV for %s\n", ctx->ivmode);
return ret;
}
}
/* Initialize IV (set keys for ESSIV etc) */
if (ctx->iv_gen_ops && ctx->iv_gen_ops->init) {
ret = ctx->iv_gen_ops->init(ctx);
if (ret < 0) {
DMERR("Error creating IV for %s\n", ctx->ivmode);
return ret;
}
}
return 0;
}
static void geniv_free_tfms_aead(struct geniv_ctx *ctx)
{
if (!ctx->tfms.tfms_aead)
return;
if (ctx->tfms.tfms_aead[0] && IS_ERR(ctx->tfms.tfms_aead[0])) {
crypto_free_aead(ctx->tfms.tfms_aead[0]);
ctx->tfms.tfms_aead[0] = NULL;
}
kfree(ctx->tfms.tfms_aead);
ctx->tfms.tfms_aead = NULL;
}
static void geniv_free_tfms_skcipher(struct geniv_ctx *ctx)
{
unsigned int i;
if (!ctx->tfms.tfms)
return;
for (i = 0; i < ctx->tfms_count; i++)
if (ctx->tfms.tfms[i] && IS_ERR(ctx->tfms.tfms[i])) {
crypto_free_skcipher(ctx->tfms.tfms[i]);
ctx->tfms.tfms[i] = NULL;
}
kfree(ctx->tfms.tfms);
ctx->tfms.tfms = NULL;
}
static void geniv_free_tfms(struct geniv_ctx *ctx)
{
if (geniv_integrity_aead(ctx))
geniv_free_tfms_aead(ctx);
else
geniv_free_tfms_skcipher(ctx);
}
static int geniv_alloc_tfms_aead(struct crypto_aead *parent,
struct geniv_ctx *ctx)
{
unsigned int reqsize, align;
ctx->tfms.tfms_aead = kcalloc(1, sizeof(struct crypto_aead *),
GFP_KERNEL);
if (!ctx->tfms.tfms_aead)
return -ENOMEM;
/* First instance is already allocated in geniv_init_tfm */
ctx->tfms.tfms_aead[0] = ctx->tfm_child.tfm_aead;
/* Setup the current cipher's request structure */
align = crypto_aead_alignmask(parent);
align &= ~(crypto_tfm_ctx_alignment() - 1);
reqsize = align + sizeof(struct geniv_req_ctx) +
crypto_aead_reqsize(ctx->tfms.tfms_aead[0]);
crypto_aead_set_reqsize(parent, reqsize);
return 0;
}
/*
* Allocate memory for the underlying cipher algorithm. Ex: cbc(aes)
*/
static int geniv_alloc_tfms_skcipher(struct crypto_skcipher *parent,
struct geniv_ctx *ctx)
{
unsigned int i, reqsize, align, err;
ctx->tfms.tfms = kcalloc(ctx->tfms_count, sizeof(struct crypto_skcipher *),
GFP_KERNEL);
if (!ctx->tfms.tfms)
return -ENOMEM;
/* First instance is already allocated in geniv_init_tfm */
ctx->tfms.tfms[0] = ctx->tfm_child.tfm;
for (i = 1; i < ctx->tfms_count; i++) {
ctx->tfms.tfms[i] = crypto_alloc_skcipher(ctx->ciphermode, 0, 0);
if (IS_ERR(ctx->tfms.tfms[i])) {
err = PTR_ERR(ctx->tfms.tfms[i]);
geniv_free_tfms(ctx);
return err;
}
/* Setup the current cipher's request structure */
align = crypto_skcipher_alignmask(parent);
align &= ~(crypto_tfm_ctx_alignment() - 1);
reqsize = align + sizeof(struct geniv_req_ctx) +
crypto_skcipher_reqsize(ctx->tfms.tfms[i]);
crypto_skcipher_set_reqsize(parent, reqsize);
}
return 0;
}
static unsigned int geniv_authenckey_size(struct geniv_ctx *ctx)
{
return ctx->key_size - ctx->key_extra_size +
RTA_SPACE(sizeof(struct crypto_authenc_key_param));
}
/*
* Initialize the cipher's context with the key, ivmode and other parameters.
* Also allocate IV generation template ciphers and initialize them.
*/
static int geniv_setkey_init(void *parent, struct geniv_key_info *info)
{
struct geniv_ctx *ctx;
int ret;
if (test_bit(CRYPT_MODE_INTEGRITY_AEAD, &info->cipher_flags))
ctx = crypto_aead_ctx((struct crypto_aead *)parent);
else
ctx = crypto_skcipher_ctx((struct crypto_skcipher *)parent);
ctx->tfms_count = info->tfms_count;
ctx->key = info->key;
ctx->cipher_flags = info->cipher_flags;
ctx->ivopts = info->ivopts;
ctx->iv_offset = info->iv_offset;
ctx->sector_size = info->sector_size;
ctx->sector_shift = __ffs(ctx->sector_size) - SECTOR_SHIFT;
ctx->key_size = info->key_size;
ctx->key_parts = info->key_parts;
ctx->key_mac_size = info->key_mac_size;
ctx->on_disk_tag_size = info->on_disk_tag_size;
if (geniv_integrity_hmac(ctx)) {
ctx->authenc_key = kmalloc(geniv_authenckey_size(ctx), GFP_KERNEL);
if (!ctx->authenc_key)
return -ENOMEM;
}
if (geniv_integrity_aead(ctx))
ret = geniv_alloc_tfms_aead((struct crypto_aead *)parent, ctx);
else
ret = geniv_alloc_tfms_skcipher((struct crypto_skcipher *)parent, ctx);
if (ret)
return ret;
ret = geniv_init_iv(ctx);
if (geniv_integrity_aead(ctx))
ctx->integrity_tag_size = ctx->on_disk_tag_size - ctx->integrity_iv_size;
return ret;
}
/*
* If AEAD is composed like authenc(hmac(sha256),xts(aes)),
* the key must be for some reason in special format.
* This function converts cc->key to this special format.
*/
static void crypt_copy_authenckey(char *p, const void *key,
unsigned int enckeylen, unsigned int authkeylen)
{
struct crypto_authenc_key_param *param;
struct rtattr *rta;
rta = (struct rtattr *)p;
param = RTA_DATA(rta);
param->enckeylen = cpu_to_be32(enckeylen);
rta->rta_len = RTA_LENGTH(sizeof(*param));
rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
p += RTA_SPACE(sizeof(*param));
memcpy(p, key + enckeylen, authkeylen);
p += authkeylen;
memcpy(p, key, enckeylen);
}
static int geniv_setkey_tfms_aead(struct crypto_aead *parent, struct geniv_ctx *ctx,
struct geniv_key_info *info)
{
unsigned int key_size;
unsigned int authenc_key_size;
struct crypto_aead *child_aead;
int ret = 0;
/* Ignore extra keys (which are used for IV etc) */
key_size = ctx->key_size - ctx->key_extra_size;
authenc_key_size = key_size + RTA_SPACE(sizeof(struct crypto_authenc_key_param));
child_aead = ctx->tfms.tfms_aead[0];
crypto_aead_clear_flags(child_aead, CRYPTO_TFM_REQ_MASK);
crypto_aead_set_flags(child_aead, crypto_aead_get_flags(parent) & CRYPTO_TFM_REQ_MASK);
if (geniv_integrity_hmac(ctx)) {
if (key_size < ctx->key_mac_size)
return -EINVAL;
crypt_copy_authenckey(ctx->authenc_key, ctx->key, key_size - ctx->key_mac_size,
ctx->key_mac_size);
}
if (geniv_integrity_hmac(ctx))
ret = crypto_aead_setkey(child_aead, ctx->authenc_key, authenc_key_size);
else
ret = crypto_aead_setkey(child_aead, ctx->key, key_size);
if (ret) {
DMERR("Error setting key for tfms[0]\n");
goto out;
}
crypto_aead_set_flags(parent, crypto_aead_get_flags(child_aead) & CRYPTO_TFM_RES_MASK);
out:
if (geniv_integrity_hmac(ctx))
memzero_explicit(ctx->authenc_key, authenc_key_size);
return ret;
}
static int geniv_setkey_tfms_skcipher(struct crypto_skcipher *parent, struct geniv_ctx *ctx,
struct geniv_key_info *info)
{
unsigned int subkey_size;
char *subkey;
struct crypto_skcipher *child;
int ret, i;
/* Ignore extra keys (which are used for IV etc) */
subkey_size = (ctx->key_size - ctx->key_extra_size)
>> ilog2(ctx->tfms_count);
for (i = 0; i < ctx->tfms_count; i++) {
child = ctx->tfms.tfms[i];
crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(child,
crypto_skcipher_get_flags(parent) & CRYPTO_TFM_REQ_MASK);
subkey = ctx->key + (subkey_size) * i;
ret = crypto_skcipher_setkey(child, subkey, subkey_size);
if (ret) {
DMERR("Error setting key for tfms[%d]\n", i);
return ret;
}
crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
CRYPTO_TFM_RES_MASK);
}
return 0;
}
static int geniv_setkey_set(struct geniv_ctx *ctx)
{
if (ctx->iv_gen_ops && ctx->iv_gen_ops->init)
return ctx->iv_gen_ops->init(ctx);
else
return 0;
}
static int geniv_setkey_wipe(struct geniv_ctx *ctx)
{
int ret;
if (ctx->iv_gen_ops && ctx->iv_gen_ops->wipe) {
ret = ctx->iv_gen_ops->wipe(ctx);
if (ret)
return ret;
}
if (geniv_integrity_hmac(ctx))
kzfree(ctx->authenc_key);
return 0;
}
static int geniv_setkey(void *parent, const u8 *key, unsigned int keylen)
{
int err = 0;
struct geniv_ctx *ctx;
struct geniv_key_info *info = (struct geniv_key_info *) key;
if (test_bit(CRYPT_MODE_INTEGRITY_AEAD, &info->cipher_flags))
ctx = crypto_aead_ctx((struct crypto_aead *)parent);
else
ctx = crypto_skcipher_ctx((struct crypto_skcipher *)parent);
DMDEBUG("SETKEY Operation : %d\n", info->keyop);
switch (info->keyop) {
case SETKEY_OP_INIT:
err = geniv_setkey_init(parent, info);
break;
case SETKEY_OP_SET:
err = geniv_setkey_set(ctx);
break;
case SETKEY_OP_WIPE:
err = geniv_setkey_wipe(ctx);
break;
}
if (err)
return err;
if (test_bit(CRYPT_MODE_INTEGRITY_AEAD, &info->cipher_flags))
return geniv_setkey_tfms_aead((struct crypto_aead *)parent, ctx, info);
else
return geniv_setkey_tfms_skcipher((struct crypto_skcipher *)parent, ctx, info);
}
static int geniv_aead_setkey(struct crypto_aead *parent,
const u8 *key, unsigned int keylen)
{
return geniv_setkey(parent, key, keylen);
}
static int geniv_skcipher_setkey(struct crypto_skcipher *parent,
const u8 *key, unsigned int keylen)
{
return geniv_setkey(parent, key, keylen);
}
static void geniv_async_done(struct crypto_async_request *async_req, int error);
static int geniv_alloc_subreq_aead(struct geniv_ctx *ctx,
struct geniv_req_ctx *rctx,
u32 req_flags)
{
struct aead_request *req;
if (!rctx->subreq) {
rctx->subreq = mempool_alloc(ctx->subreq_pool, GFP_NOIO);
if (!rctx->subreq)
return -ENOMEM;
}
req = &rctx->subreq->r.req_aead;
rctx->subreq->rctx = rctx;
aead_request_set_tfm(req, ctx->tfms.tfms_aead[0]);
aead_request_set_callback(req, req_flags,
geniv_async_done, rctx->subreq);
return 0;
}
/* req_flags: flags from parent request */
static int geniv_alloc_subreq_skcipher(struct geniv_ctx *ctx,
struct geniv_req_ctx *rctx,
u32 req_flags)
{
int key_index;
struct skcipher_request *req;
if (!rctx->subreq) {
rctx->subreq = mempool_alloc(ctx->subreq_pool, GFP_NOIO);
if (!rctx->subreq)
return -ENOMEM;
}
req = &rctx->subreq->r.req;
rctx->subreq->rctx = rctx;
key_index = rctx->cc_sector & (ctx->tfms_count - 1);
skcipher_request_set_tfm(req, ctx->tfms.tfms[key_index]);
skcipher_request_set_callback(req, req_flags,
geniv_async_done, rctx->subreq);
return 0;
}
/*
* Asynchronous IO completion callback for each sector in a segment. When all
* pending i/o are completed the parent cipher's async function is called.
*/
static void geniv_async_done(struct crypto_async_request *async_req, int error)
{
struct geniv_subreq *subreq = async_req->data;
struct geniv_req_ctx *rctx = subreq->rctx;
struct skcipher_request *req = NULL;
struct aead_request *req_aead = NULL;
struct geniv_ctx *ctx;
u8 *iv;
if (!rctx->is_aead_request) {
req = rctx->r.req;
ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
} else {
req_aead = rctx->r.req_aead;
ctx = crypto_aead_ctx(crypto_aead_reqtfm(req_aead));
}
/*
* A request from crypto driver backlog is going to be processed now,
* finish the completion and continue in crypt_convert().
* (Callback will be called for the second time for this request.)
*/
if (error == -EINPROGRESS) {
complete(&rctx->restart);
return;
}
iv = iv_of_subreq(ctx, subreq);
if (!error && ctx->iv_gen_ops && ctx->iv_gen_ops->post)
error = ctx->iv_gen_ops->post(ctx, rctx, subreq, iv);
mempool_free(subreq, ctx->subreq_pool);
/*
* req_pending needs to be checked before req->base.complete is called
* as we need 'req_pending' to be equal to 1 to ensure all subrequests
* are processed.
*/
if (atomic_dec_and_test(&rctx->req_pending)) {
/* Call the parent cipher's completion function */
if (!rctx->is_aead_request)
skcipher_request_complete(req, error);
else
aead_request_complete(req_aead, error);
}
}
static unsigned int geniv_get_sectors(struct scatterlist *sg1,
struct scatterlist *sg2,
unsigned int segments)
{
unsigned int i, n1, n2;
n1 = n2 = 0;
for (i = 0; i < segments ; i++) {
n1 += sg1[i].length >> SECTOR_SHIFT;
n1 += (sg1[i].length & SECTOR_MASK) ? 1 : 0;
}
for (i = 0; i < segments ; i++) {
n2 += sg2[i].length >> SECTOR_SHIFT;
n2 += (sg2[i].length & SECTOR_MASK) ? 1 : 0;
}
return max(n1, n2);
}
/*
* Iterate scatterlist of segments to retrieve the 512-byte sectors so that
* unique IVs could be generated for each 512-byte sector. This split may not
* be necessary e.g. when these ciphers are modelled in hardware, where it can
* make use of the hardware's IV generation capabilities.
*/
static int geniv_iter_block(void *req_in,
struct geniv_ctx *ctx, struct geniv_req_ctx *rctx)
{
unsigned int rem;
struct scatterlist *src_org, *dst_org;
struct scatterlist *src1, *dst1;
struct scatterlist_iter *iter = &rctx->iter;
if (unlikely(iter->seg_no >= rctx->nents))
return 0;
if (geniv_integrity_aead(ctx)) {
struct aead_request *req_aead = (struct aead_request *)req_in;
src_org = &req_aead->src[0];
dst_org = &req_aead->dst[0];
} else {
struct skcipher_request *req = (struct skcipher_request *)req_in;
src_org = &req->src[0];
dst_org = &req->dst[0];
}
src1 = &src_org[iter->seg_no];
dst1 = &dst_org[iter->seg_no];
iter->done += iter->len;
if (iter->done >= src1->length) {
iter->seg_no++;
if (iter->seg_no >= rctx->nents)
return 0;
src1 = &src_org[iter->seg_no];
dst1 = &dst_org[iter->seg_no];
iter->done = 0;
}
rem = src1->length - iter->done;
iter->len = rem > ctx->sector_size ? ctx->sector_size : rem;
DMDEBUG("segment:(%d/%u), done:%d, rem:%d\n",
iter->seg_no, rctx->nents, iter->done, rem);
return iter->len;
}
static u8 *org_iv_of_subreq(struct geniv_ctx *ctx, struct geniv_subreq *subreq)
{
return iv_of_subreq(ctx, subreq) + ctx->iv_size;
}
static uint64_t *org_sector_of_subreq(struct geniv_ctx *ctx, struct geniv_subreq *subreq)
{
u8 *ptr = iv_of_subreq(ctx, subreq) + ctx->iv_size + ctx->iv_size;
return (uint64_t *) ptr;
}
static unsigned int *org_tag_of_subreq(struct geniv_ctx *ctx, struct geniv_subreq *subreq)
{
u8 *ptr = iv_of_subreq(ctx, subreq) + ctx->iv_size +
ctx->iv_size + sizeof(uint64_t);
return (unsigned int *)ptr;
}
static void *tag_from_subreq(struct geniv_ctx *ctx, struct geniv_subreq *subreq)
{
return &subreq->rctx->integrity_metadata[*org_tag_of_subreq(ctx, subreq) *
ctx->on_disk_tag_size];
}
static void *iv_tag_from_subreq(struct geniv_ctx *ctx, struct geniv_subreq *subreq)
{
return tag_from_subreq(ctx, subreq) + ctx->integrity_tag_size;
}
static int geniv_convert_block_aead(struct geniv_ctx *ctx,
struct geniv_req_ctx *rctx,
struct geniv_subreq *subreq,
unsigned int tag_offset)
{
struct scatterlist *sg_in, *sg_out;
u8 *iv, *org_iv, *tag_iv, *tag;
uint64_t *sector;
int r = 0;
struct scatterlist_iter *iter = &rctx->iter;
struct aead_request *req_aead;
struct aead_request *parent_req = rctx->r.req_aead;
BUG_ON(ctx->integrity_iv_size && ctx->integrity_iv_size != ctx->iv_size);
/* Reject unexpected unaligned bio. */
if (unlikely(iter->len & (ctx->sector_size - 1)))
return -EIO;
subreq->iv_sector = rctx->cc_sector;
if (test_bit(CRYPT_IV_LARGE_SECTORS, &ctx->cipher_flags))
subreq->iv_sector >>= ctx->sector_shift;
*org_tag_of_subreq(ctx, subreq) = tag_offset;
sector = org_sector_of_subreq(ctx, subreq);
*sector = cpu_to_le64(rctx->cc_sector - ctx->iv_offset);
iv = iv_of_subreq(ctx, subreq);
org_iv = org_iv_of_subreq(ctx, subreq);
tag = tag_from_subreq(ctx, subreq);
tag_iv = iv_tag_from_subreq(ctx, subreq);
sg_in = subreq->sg_in;
sg_out = subreq->sg_out;
/*
* AEAD request:
* |----- AAD -------|------ DATA -------|-- AUTH TAG --|
* | (authenticated) | (auth+encryption) | |
* | sector_LE | IV | sector in/out | tag in/out |
*/
sg_init_table(sg_in, 4);
sg_set_buf(&sg_in[0], sector, sizeof(uint64_t));
sg_set_buf(&sg_in[1], org_iv, ctx->iv_size);
sg_set_page(&sg_in[2], sg_page(&parent_req->src[iter->seg_no]),
iter->len, parent_req->src[iter->seg_no].offset + iter->done);
sg_set_buf(&sg_in[3], tag, ctx->integrity_tag_size);
sg_init_table(sg_out, 4);
sg_set_buf(&sg_out[0], sector, sizeof(uint64_t));
sg_set_buf(&sg_out[1], org_iv, ctx->iv_size);
sg_set_page(&sg_out[2], sg_page(&parent_req->dst[iter->seg_no]),
iter->len, parent_req->dst[iter->seg_no].offset + iter->done);
sg_set_buf(&sg_out[3], tag, ctx->integrity_tag_size);
if (ctx->iv_gen_ops) {
/* For READs use IV stored in integrity metadata */
if (ctx->integrity_iv_size && !rctx->is_write) {
memcpy(org_iv, tag_iv, ctx->iv_size);
} else {
r = ctx->iv_gen_ops->generator(ctx, rctx, subreq, org_iv);
if (r < 0)
return r;
/* Store generated IV in integrity metadata */
if (ctx->integrity_iv_size)
memcpy(tag_iv, org_iv, ctx->iv_size);
}
/* Working copy of IV, to be modified in crypto API */
memcpy(iv, org_iv, ctx->iv_size);
}
req_aead = &subreq->r.req_aead;
aead_request_set_ad(req_aead, sizeof(uint64_t) + ctx->iv_size);
if (rctx->is_write) {
aead_request_set_crypt(req_aead, subreq->sg_in, subreq->sg_out,
ctx->sector_size, iv);
r = crypto_aead_encrypt(req_aead);
if (ctx->integrity_tag_size + ctx->integrity_iv_size != ctx->on_disk_tag_size)
memset(tag + ctx->integrity_tag_size + ctx->integrity_iv_size, 0,
ctx->on_disk_tag_size - (ctx->integrity_tag_size + ctx->integrity_iv_size));
} else {
aead_request_set_crypt(req_aead, subreq->sg_in, subreq->sg_out,
ctx->sector_size + ctx->integrity_tag_size, iv);
r = crypto_aead_decrypt(req_aead);
}
if (r == -EBADMSG)
DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu",
(unsigned long long)le64_to_cpu(*sector));
if (!r && ctx->iv_gen_ops && ctx->iv_gen_ops->post)
r = ctx->iv_gen_ops->post(ctx, rctx, subreq, org_iv);
return r;
}
static int geniv_convert_block_skcipher(struct geniv_ctx *ctx,
struct geniv_req_ctx *rctx,
struct geniv_subreq *subreq,
unsigned int tag_offset)
{
struct scatterlist *sg_in, *sg_out;
u8 *iv, *org_iv, *tag_iv;
uint64_t *sector;
int r = 0;
struct scatterlist_iter *iter = &rctx->iter;
struct skcipher_request *req;
struct skcipher_request *parent_req = rctx->r.req;
/* Reject unexpected unaligned bio. */
if (unlikely(iter->len & (ctx->sector_size - 1)))
return -EIO;
subreq->iv_sector = rctx->cc_sector;
if (test_bit(CRYPT_IV_LARGE_SECTORS, &ctx->cipher_flags))
subreq->iv_sector >>= ctx->sector_shift;
*org_tag_of_subreq(ctx, subreq) = tag_offset;
iv = iv_of_subreq(ctx, subreq);
org_iv = org_iv_of_subreq(ctx, subreq);
tag_iv = iv_tag_from_subreq(ctx, subreq);
sector = org_sector_of_subreq(ctx, subreq);
*sector = cpu_to_le64(rctx->cc_sector - ctx->iv_offset);
/* For skcipher we use only the first sg item */
sg_in = subreq->sg_in;
sg_out = subreq->sg_out;
sg_init_table(sg_in, 1);
sg_set_page(sg_in, sg_page(&parent_req->src[iter->seg_no]),
iter->len, parent_req->src[iter->seg_no].offset + iter->done);
sg_init_table(sg_out, 1);
sg_set_page(sg_out, sg_page(&parent_req->dst[iter->seg_no]),
iter->len, parent_req->dst[iter->seg_no].offset + iter->done);
if (ctx->iv_gen_ops) {
/* For READs use IV stored in integrity metadata */
if (ctx->integrity_iv_size && !rctx->is_write) {
memcpy(org_iv, tag_iv, ctx->integrity_iv_size);
} else {
r = ctx->iv_gen_ops->generator(ctx, rctx, subreq, org_iv);
if (r < 0)
return r;
/* Store generated IV in integrity metadata */
if (ctx->integrity_iv_size)
memcpy(tag_iv, org_iv, ctx->integrity_iv_size);
}
/* Working copy of IV, to be modified in crypto API */
memcpy(iv, org_iv, ctx->iv_size);
}
req = &subreq->r.req;
skcipher_request_set_crypt(req, sg_in, sg_out, ctx->sector_size, iv);
if (rctx->is_write)
r = crypto_skcipher_encrypt(req);
else
r = crypto_skcipher_decrypt(req);
if (!r && ctx->iv_gen_ops && ctx->iv_gen_ops->post)
r = ctx->iv_gen_ops->post(ctx, rctx, subreq, org_iv);
return r;
}
/*
* Common encryt/decrypt function for geniv template cipher. Before the crypto
* operation, it splits the memory segments (in the scatterlist) into 512 byte
* sectors. The initialization vector(IV) used is based on a unique sector
* number which is generated here.
*/
static int geniv_crypt(struct geniv_ctx *ctx, void *parent_req, bool is_encrypt)
{
struct skcipher_request *req = NULL;
struct aead_request *req_aead = NULL;
struct geniv_req_ctx *rctx;
struct geniv_req_info *rinfo;
int i, bytes, cryptlen, ret = 0;
unsigned int sectors;
unsigned int tag_offset = 0;
unsigned int sector_step = ctx->sector_size >> SECTOR_SHIFT;
char *str __maybe_unused = is_encrypt ? "encrypt" : "decrypt";
if (geniv_integrity_aead(ctx)) {
req_aead = (struct aead_request *)parent_req;
rctx = geniv_aead_req_ctx(req_aead);
rctx->r.req_aead = req_aead;
rinfo = (struct geniv_req_info *)req_aead->iv;
} else {
req = (struct skcipher_request *)parent_req;
rctx = geniv_skcipher_req_ctx(req);
rctx->r.req = req;
rinfo = (struct geniv_req_info *)req->iv;
}
/* Instance of 'struct geniv_req_info' is stored in IV ptr */
rctx->is_write = is_encrypt;
rctx->is_aead_request = geniv_integrity_aead(ctx);
rctx->cc_sector = rinfo->cc_sector;
rctx->nents = rinfo->nents;
rctx->integrity_metadata = rinfo->integrity_metadata;
rctx->subreq = NULL;
cryptlen = req->cryptlen;
rctx->iter.seg_no = 0;
rctx->iter.done = 0;
rctx->iter.len = 0;
DMDEBUG("geniv:%s: starting sector=%d, #segments=%u\n", str,
(unsigned int)rctx->cc_sector, rctx->nents);
if (geniv_integrity_aead(ctx))
sectors = geniv_get_sectors(req_aead->src, req_aead->dst, rctx->nents);
else
sectors = geniv_get_sectors(req->src, req->dst, rctx->nents);
init_completion(&rctx->restart);
atomic_set(&rctx->req_pending, 1);
for (i = 0; i < sectors; i++) {
struct geniv_subreq *subreq;
if (geniv_integrity_aead(ctx))
ret = geniv_alloc_subreq_aead(ctx, rctx, req_aead->base.flags);
else
ret = geniv_alloc_subreq_skcipher(ctx, rctx, req->base.flags);
if (ret)
return -ENOMEM;
subreq = rctx->subreq;
if (geniv_integrity_aead(ctx))
bytes = geniv_iter_block(req_aead, ctx, rctx);
else
bytes = geniv_iter_block(req, ctx, rctx);
if (bytes == 0)
break;
cryptlen -= bytes;
atomic_inc(&rctx->req_pending);
if (geniv_integrity_aead(ctx))
ret = geniv_convert_block_aead(ctx, rctx, subreq, tag_offset);
else
ret = geniv_convert_block_skcipher(ctx, rctx, subreq, tag_offset);
switch (ret) {
/*
* The request was queued by a crypto driver
* but the driver request queue is full, let's wait.
*/
case -EBUSY:
wait_for_completion(&rctx->restart);
reinit_completion(&rctx->restart);
/* fall through */
/*
* The request is queued and processed asynchronously,
* completion function geniv_async_done() is called.
*/
case -EINPROGRESS:
/* Marking this NULL lets the creation of a new sub-
* request when 'geniv_alloc_subreq' is called.
*/
rctx->subreq = NULL;
rctx->cc_sector += sector_step;
tag_offset++;
cond_resched();
break;
/*
* The request was already processed (synchronously).
*/
case 0:
atomic_dec(&rctx->req_pending);
rctx->cc_sector += sector_step;
tag_offset++;
cond_resched();
continue;
/* There was an error while processing the request. */
default:
atomic_dec(&rctx->req_pending);
mempool_free(rctx->subreq, ctx->subreq_pool);
atomic_dec(&rctx->req_pending);
return ret;
}
}
if (rctx->subreq)
mempool_free(rctx->subreq, ctx->subreq_pool);
if (atomic_dec_and_test(&rctx->req_pending))
return 0;
else
return -EINPROGRESS;
}
static int geniv_skcipher_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct geniv_ctx *ctx = crypto_skcipher_ctx(tfm);
return geniv_crypt(ctx, req, true);
}
static int geniv_skcipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct geniv_ctx *ctx = crypto_skcipher_ctx(tfm);
return geniv_crypt(ctx, req, false);
}
static int geniv_aead_encrypt(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct geniv_ctx *ctx = crypto_aead_ctx(tfm);
return geniv_crypt(ctx, req, true);
}
static int geniv_aead_decrypt(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct geniv_ctx *ctx = crypto_aead_ctx(tfm);
return geniv_crypt(ctx, req, false);
}
/*
* Workaround to parse cipher algorithm from crypto API spec.
* The ctx->cipher is currently used only in ESSIV.
* This should be probably done by crypto-api calls (once available...)
*/
static int geniv_blkdev_cipher(struct geniv_ctx *ctx, bool is_crypto_aead)
{
const char *alg_name = NULL;
char *start, *end;
alg_name = ctx->ciphermode;
if (!alg_name)
return -EINVAL;
if (is_crypto_aead) {
alg_name = strchr(alg_name, ',');
if (!alg_name)
alg_name = ctx->ciphermode;
alg_name++;
}
start = strchr(alg_name, '(');
end = strchr(alg_name, ')');
if (!start && !end) {
ctx->cipher = kstrdup(alg_name, GFP_KERNEL);
return ctx->cipher ? 0 : -ENOMEM;
}
if (!start || !end || ++start >= end)
return -EINVAL;
ctx->cipher = kzalloc(end - start + 1, GFP_KERNEL);
if (!ctx->cipher)
return -ENOMEM;
strncpy(ctx->cipher, start, end - start);
return 0;
}
static int geniv_init_tfm(void *tfm_tmp, bool is_crypto_aead)
{
struct geniv_ctx *ctx;
struct crypto_skcipher *tfm;
struct crypto_aead *tfm_aead;
unsigned int reqsize;
size_t iv_size_padding;
char *algname;
int psize, ret;
if (is_crypto_aead) {
tfm_aead = (struct crypto_aead *)tfm_tmp;
ctx = crypto_aead_ctx(tfm_aead);
algname = (char *) crypto_tfm_alg_name(crypto_aead_tfm(tfm_aead));
} else {
tfm = (struct crypto_skcipher *)tfm_tmp;
ctx = crypto_skcipher_ctx(tfm);
algname = (char *) crypto_tfm_alg_name(crypto_skcipher_tfm(tfm));
}
ctx->ciphermode = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
if (!ctx->ciphermode)
return -ENOMEM;
ctx->algname = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
if (!ctx->algname) {
ret = -ENOMEM;
goto free_ciphermode;
}
strlcpy(ctx->algname, algname, CRYPTO_MAX_ALG_NAME);
algname = ctx->algname;
/* Parse the algorithm name 'ivmode(ciphermode)' */
ctx->ivmode = strsep(&algname, "(");
strlcpy(ctx->ciphermode, algname, CRYPTO_MAX_ALG_NAME);
ctx->ciphermode[strlen(algname) - 1] = '\0';
DMDEBUG("ciphermode=%s, ivmode=%s\n", ctx->ciphermode, ctx->ivmode);
/*
* Usually the underlying cipher instances are spawned here, but since
* the value of tfms_count (which is equal to the key_count) is not
* known yet, create only one instance and delay the creation of the
* rest of the instances of the underlying cipher 'cbc(aes)' until
* the setkey operation is invoked.
* The first instance created i.e. ctx->child will later be assigned as
* the 1st element in the array ctx->tfms. Creation of atleast one
* instance of the cipher is necessary to be created here to uncover
* any errors earlier than during the setkey operation later where the
* remaining instances are created.
*/
if (is_crypto_aead)
ctx->tfm_child.tfm_aead = crypto_alloc_aead(ctx->ciphermode, 0, 0);
else
ctx->tfm_child.tfm = crypto_alloc_skcipher(ctx->ciphermode, 0, 0);
if (IS_ERR(ctx->tfm_child.tfm)) {
ret = PTR_ERR(ctx->tfm_child.tfm);
DMERR("Failed to create cipher %s. err %d\n",
ctx->ciphermode, ret);
goto free_algname;
}
/* Setup the current cipher's request structure */
if (is_crypto_aead) {
reqsize = sizeof(struct geniv_req_ctx) + __alignof__(struct geniv_req_ctx);
crypto_aead_set_reqsize(tfm_aead, reqsize);
ctx->iv_start = sizeof(struct geniv_subreq);
ctx->iv_start += crypto_aead_reqsize(ctx->tfm_child.tfm_aead);
ctx->iv_size = crypto_aead_ivsize(tfm_aead);
} else {
reqsize = sizeof(struct geniv_req_ctx) + __alignof__(struct geniv_req_ctx);
crypto_skcipher_set_reqsize(tfm, reqsize);
ctx->iv_start = sizeof(struct geniv_subreq);
ctx->iv_start += crypto_skcipher_reqsize(ctx->tfm_child.tfm);
ctx->iv_size = crypto_skcipher_ivsize(tfm);
}
/* at least a 64 bit sector number should fit in our buffer */
if (ctx->iv_size)
ctx->iv_size = max(ctx->iv_size,
(unsigned int)(sizeof(u64) / sizeof(u8)));
if (is_crypto_aead) {
if (crypto_aead_alignmask(tfm_aead) < CRYPTO_MINALIGN) {
/* Allocate the padding exactly */
iv_size_padding = -ctx->iv_start
& crypto_aead_alignmask(ctx->tfm_child.tfm_aead);
} else {
/*
* If the cipher requires greater alignment than kmalloc
* alignment, we don't know the exact position of the
* initialization vector. We must assume worst case.
*/
iv_size_padding = crypto_aead_alignmask(ctx->tfm_child.tfm_aead);
}
} else {
if (crypto_skcipher_alignmask(tfm) < CRYPTO_MINALIGN) {
iv_size_padding = -ctx->iv_start
& crypto_skcipher_alignmask(ctx->tfm_child.tfm);
} else {
iv_size_padding = crypto_skcipher_alignmask(ctx->tfm_child.tfm);
}
}
/*
* create memory pool for sub-request structure
* ...| IV + padding | original IV | original sec. number | bio tag offset |
*/
psize = ctx->iv_start + iv_size_padding + ctx->iv_size + ctx->iv_size +
sizeof(uint64_t) + sizeof(unsigned int);
ctx->subreq_pool = mempool_create_kmalloc_pool(MIN_IOS, psize);
if (!ctx->subreq_pool) {
ret = -ENOMEM;
DMERR("Could not allocate crypt sub-request mempool\n");
goto free_tfm;
}
ret = geniv_blkdev_cipher(ctx, is_crypto_aead);
if (ret < 0) {
ret = -ENOMEM;
DMERR("Cannot allocate cipher string\n");
goto free_tfm;
}
__cpu_to_le32s(&md5state.hash[i]);
memcpy(iv, &md5state.hash, cc->iv_size);
return 0;
free_tfm:
if (is_crypto_aead)
crypto_free_aead(ctx->tfm_child.tfm_aead);
else
crypto_free_skcipher(ctx->tfm_child.tfm);
free_algname:
kfree(ctx->algname);
free_ciphermode:
kfree(ctx->ciphermode);
return ret;
}
static int geniv_skcipher_init_tfm(struct crypto_skcipher *tfm)
static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
struct dm_crypt_request *dmreq)
{
return geniv_init_tfm(tfm, 0);
}
struct scatterlist *sg;
u8 *src;
int r = 0;
static int geniv_aead_init_tfm(struct crypto_aead *tfm)
{
return geniv_init_tfm(tfm, 1);
}
if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
sg = crypt_get_sg_data(cc, dmreq->sg_in);
src = kmap_atomic(sg_page(sg));
r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset);
kunmap_atomic(src);
} else
memset(iv, 0, cc->iv_size);
static void geniv_exit_tfm(struct geniv_ctx *ctx)
{
if (ctx->iv_gen_ops && ctx->iv_gen_ops->dtr)
ctx->iv_gen_ops->dtr(ctx);
mempool_destroy(ctx->subreq_pool);
geniv_free_tfms(ctx);
kzfree(ctx->ciphermode);
kzfree(ctx->algname);
kzfree(ctx->cipher);
return r;
}
static void geniv_skcipher_exit_tfm(struct crypto_skcipher *tfm)
static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
struct dm_crypt_request *dmreq)
{
struct geniv_ctx *ctx = crypto_skcipher_ctx(tfm);
geniv_exit_tfm(ctx);
}
struct scatterlist *sg;
u8 *dst;
int r;
static void geniv_aead_exit_tfm(struct crypto_aead *tfm)
{
struct geniv_ctx *ctx = crypto_aead_ctx(tfm);
if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
return 0;
geniv_exit_tfm(ctx);
}
sg = crypt_get_sg_data(cc, dmreq->sg_out);
dst = kmap_atomic(sg_page(sg));
r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset);
static void geniv_skcipher_free(struct skcipher_instance *inst)
{
struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
/* Tweak the first block of plaintext sector */
if (!r)
crypto_xor(dst + sg->offset, iv, cc->iv_size);
crypto_drop_skcipher(spawn);
kfree(inst);
kunmap_atomic(dst);
return r;
}
static void geniv_aead_free(struct aead_instance *inst)
static void crypt_iv_tcw_dtr(struct crypt_config *cc)
{
struct crypto_aead_spawn *spawn = aead_instance_ctx(inst);
struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
kzfree(tcw->iv_seed);
tcw->iv_seed = NULL;
kzfree(tcw->whitening);
tcw->whitening = NULL;
crypto_drop_aead(spawn);
kfree(inst);
if (tcw->crc32_tfm && !IS_ERR(tcw->crc32_tfm))
crypto_free_shash(tcw->crc32_tfm);
tcw->crc32_tfm = NULL;
}
static int geniv_skcipher_create(struct crypto_template *tmpl,
struct rtattr **tb, char *algname)
static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
const char *opts)
{
struct crypto_attr_type *algt;
struct skcipher_instance *inst;
struct skcipher_alg *alg;
struct crypto_skcipher_spawn *spawn;
const char *cipher_name;
int err;
struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
algt = crypto_get_attr_type(tb);
if (cc->sector_size != (1 << SECTOR_SHIFT)) {
ti->error = "Unsupported sector size for TCW";
return -EINVAL;
}
cipher_name = crypto_attr_alg_name(tb[1]);
if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
ti->error = "Wrong key size for TCW";
return -EINVAL;
}
if (IS_ERR(cipher_name))
return PTR_ERR(cipher_name);
tcw->crc32_tfm = crypto_alloc_shash("crc32", 0, 0);
if (IS_ERR(tcw->crc32_tfm)) {
ti->error = "Error initializing CRC32 in TCW";
return PTR_ERR(tcw->crc32_tfm);
}
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
tcw->whitening = kzalloc(TCW_WHITENING_SIZE, GFP_KERNEL);
if (!tcw->iv_seed || !tcw->whitening) {
crypt_iv_tcw_dtr(cc);
ti->error = "Error allocating seed storage in TCW";
return -ENOMEM;
}
spawn = skcipher_instance_ctx(inst);
crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
err = crypto_grab_skcipher(spawn, cipher_name, 0,
crypto_requires_sync(algt->type,
algt->mask));
if (err)
goto err_free_inst;
alg = crypto_spawn_skcipher_alg(spawn);
return 0;
}
err = -EINVAL;
static int crypt_iv_tcw_init(struct crypt_config *cc)
{
struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
/* Only support blocks of size which is of a power of 2 */
if (!is_power_of_2(alg->base.cra_blocksize))
goto err_drop_spawn;
memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
TCW_WHITENING_SIZE);
/* algname: essiv, base.cra_name: cbc(aes) */
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
algname, alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
goto err_drop_spawn;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"%s(%s)", algname, alg->base.cra_driver_name) >=
CRYPTO_MAX_ALG_NAME)
goto err_drop_spawn;
return 0;
}
inst->alg.base.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
inst->alg.base.cra_priority = alg->base.cra_priority;
inst->alg.base.cra_blocksize = alg->base.cra_blocksize;
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.ivsize = alg->base.cra_blocksize;
inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
inst->alg.min_keysize = sizeof(struct geniv_key_info);
inst->alg.max_keysize = sizeof(struct geniv_key_info);
static int crypt_iv_tcw_wipe(struct crypt_config *cc)
{
struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
inst->alg.setkey = geniv_skcipher_setkey;
inst->alg.encrypt = geniv_skcipher_encrypt;
inst->alg.decrypt = geniv_skcipher_decrypt;
memset(tcw->iv_seed, 0, cc->iv_size);
memset(tcw->whitening, 0, TCW_WHITENING_SIZE);
inst->alg.base.cra_ctxsize = sizeof(struct geniv_ctx);
return 0;
}
inst->alg.init = geniv_skcipher_init_tfm;
inst->alg.exit = geniv_skcipher_exit_tfm;
static int crypt_iv_tcw_whitening(struct crypt_config *cc,
struct dm_crypt_request *dmreq,
u8 *data)
{
struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
__le64 sector = cpu_to_le64(dmreq->iv_sector);
u8 buf[TCW_WHITENING_SIZE];
SHASH_DESC_ON_STACK(desc, tcw->crc32_tfm);
int i, r;
inst->free = geniv_skcipher_free;
/* xor whitening with sector number */
crypto_xor_cpy(buf, tcw->whitening, (u8 *)&sector, 8);
crypto_xor_cpy(&buf[8], tcw->whitening + 8, (u8 *)&sector, 8);
err = skcipher_register_instance(tmpl, inst);
if (err)
goto err_drop_spawn;
/* calculate crc32 for every 32bit part and xor it */
desc->tfm = tcw->crc32_tfm;
desc->flags = 0;
for (i = 0; i < 4; i++) {
r = crypto_shash_init(desc);
if (r)
goto out;
r = crypto_shash_update(desc, &buf[i * 4], 4);
if (r)
goto out;
r = crypto_shash_final(desc, &buf[i * 4]);
if (r)
goto out;
}
crypto_xor(&buf[0], &buf[12], 4);
crypto_xor(&buf[4], &buf[8], 4);
/* apply whitening (8 bytes) to whole sector */
for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++)
crypto_xor(data + i * 8, buf, 8);
out:
return err;
err_drop_spawn:
crypto_drop_skcipher(spawn);
err_free_inst:
kfree(inst);
goto out;
memzero_explicit(buf, sizeof(buf));
return r;
}
static int geniv_aead_create(struct crypto_template *tmpl,
struct rtattr **tb, char *algname)
static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
struct dm_crypt_request *dmreq)
{
struct crypto_attr_type *algt;
struct aead_instance *inst;
struct aead_alg *alg;
struct crypto_aead_spawn *spawn;
const char *cipher_name;
int err;
algt = crypto_get_attr_type(tb);
cipher_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(cipher_name))
return PTR_ERR(cipher_name);
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return -ENOMEM;
struct scatterlist *sg;
struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
__le64 sector = cpu_to_le64(dmreq->iv_sector);
u8 *src;
int r = 0;
spawn = aead_instance_ctx(inst);
/* Remove whitening from ciphertext */
if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
sg = crypt_get_sg_data(cc, dmreq->sg_in);
src = kmap_atomic(sg_page(sg));
r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset);
kunmap_atomic(src);
}
crypto_set_aead_spawn(spawn, aead_crypto_instance(inst));
err = crypto_grab_aead(spawn, cipher_name, 0,
crypto_requires_sync(algt->type,
algt->mask));
if (err)
goto err_free_inst;
/* Calculate IV */
crypto_xor_cpy(iv, tcw->iv_seed, (u8 *)&sector, 8);
if (cc->iv_size > 8)
crypto_xor_cpy(&iv[8], tcw->iv_seed + 8, (u8 *)&sector,
cc->iv_size - 8);
alg = crypto_spawn_aead_alg(spawn);
return r;
}
/* Only support blocks of size which is of a power of 2 */
if (!is_power_of_2(alg->base.cra_blocksize)) {
err = -EINVAL;
goto err_drop_spawn;
}
static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
struct dm_crypt_request *dmreq)
{
struct scatterlist *sg;
u8 *dst;
int r;
/* algname: essiv, base.cra_name: cbc(aes) */
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
algname, alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME) {
err = -ENAMETOOLONG;
goto err_drop_spawn;
}
if (bio_data_dir(dmreq->ctx->bio_in) != WRITE)
return 0;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"%s(%s)", algname, alg->base.cra_driver_name) >=
CRYPTO_MAX_ALG_NAME) {
err = -ENAMETOOLONG;
goto err_drop_spawn;
}
/* Apply whitening on ciphertext */
sg = crypt_get_sg_data(cc, dmreq->sg_out);
dst = kmap_atomic(sg_page(sg));
r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset);
kunmap_atomic(dst);
inst->alg.base.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
inst->alg.base.cra_priority = alg->base.cra_priority;
inst->alg.base.cra_blocksize = alg->base.cra_blocksize;
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
inst->alg.chunksize = crypto_aead_alg_chunksize(alg);
inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
return r;
}
inst->alg.setkey = geniv_aead_setkey;
inst->alg.encrypt = geniv_aead_encrypt;
inst->alg.decrypt = geniv_aead_decrypt;
static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv,
struct dm_crypt_request *dmreq)
{
/* Used only for writes, there must be an additional space to store IV */
get_random_bytes(iv, cc->iv_size);
return 0;
}
inst->alg.base.cra_ctxsize = sizeof(struct geniv_ctx);
static const struct crypt_iv_operations crypt_iv_plain_ops = {
.generator = crypt_iv_plain_gen
};
inst->alg.init = geniv_aead_init_tfm;
inst->alg.exit = geniv_aead_exit_tfm;
static const struct crypt_iv_operations crypt_iv_plain64_ops = {
.generator = crypt_iv_plain64_gen
};
inst->free = geniv_aead_free;
static const struct crypt_iv_operations crypt_iv_plain64be_ops = {
.generator = crypt_iv_plain64be_gen
};
err = aead_register_instance(tmpl, inst);
if (err)
goto err_drop_spawn;
static const struct crypt_iv_operations crypt_iv_essiv_ops = {
.ctr = crypt_iv_essiv_ctr,
.dtr = crypt_iv_essiv_dtr,
.init = crypt_iv_essiv_init,
.wipe = crypt_iv_essiv_wipe,
.generator = crypt_iv_essiv_gen
};
return 0;
static const struct crypt_iv_operations crypt_iv_benbi_ops = {
.ctr = crypt_iv_benbi_ctr,
.dtr = crypt_iv_benbi_dtr,
.generator = crypt_iv_benbi_gen
};
err_drop_spawn:
crypto_drop_aead(spawn);
err_free_inst:
kfree(inst);
return err;
}
static const struct crypt_iv_operations crypt_iv_null_ops = {
.generator = crypt_iv_null_gen
};
static int geniv_create(struct crypto_template *tmpl,
struct rtattr **tb, char *algname)
{
if (!crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER))
return geniv_skcipher_create(tmpl, tb, algname);
else if (!crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD))
return geniv_aead_create(tmpl, tb, algname);
else
return -EINVAL;
}
static const struct crypt_iv_operations crypt_iv_lmk_ops = {
.ctr = crypt_iv_lmk_ctr,
.dtr = crypt_iv_lmk_dtr,
.init = crypt_iv_lmk_init,
.wipe = crypt_iv_lmk_wipe,
.generator = crypt_iv_lmk_gen,
.post = crypt_iv_lmk_post
};
static int geniv_template_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
return geniv_create(tmpl, tb, tmpl->name);
}
static const struct crypt_iv_operations crypt_iv_tcw_ops = {
.ctr = crypt_iv_tcw_ctr,
.dtr = crypt_iv_tcw_dtr,
.init = crypt_iv_tcw_init,
.wipe = crypt_iv_tcw_wipe,
.generator = crypt_iv_tcw_gen,
.post = crypt_iv_tcw_post
};
#define DECLARE_CRYPTO_TEMPLATE(type) \
{ .name = type, \
.create = geniv_template_create, \
.module = THIS_MODULE, },
static struct crypto_template geniv_tmpl[] = {
DECLARE_CRYPTO_TEMPLATE("plain")
DECLARE_CRYPTO_TEMPLATE("plain64")
DECLARE_CRYPTO_TEMPLATE("essiv")
DECLARE_CRYPTO_TEMPLATE("benbi")
DECLARE_CRYPTO_TEMPLATE("null")
DECLARE_CRYPTO_TEMPLATE("lmk")
DECLARE_CRYPTO_TEMPLATE("tcw")
DECLARE_CRYPTO_TEMPLATE("random")
static struct crypt_iv_operations crypt_iv_random_ops = {
.generator = crypt_iv_random_gen
};
/*
......@@ -3435,8 +1925,27 @@ static unsigned crypt_authenckey_size(struct crypt_config *cc)
return crypt_subkey_size(cc) + RTA_SPACE(sizeof(struct crypto_authenc_key_param));
}
/*
* If AEAD is composed like authenc(hmac(sha256),xts(aes)),
* the key must be for some reason in special format.
* This funcion converts cc->key to this special format.
*/
static void crypt_copy_authenckey(char *p, const void *key,
unsigned enckeylen, unsigned authkeylen);
unsigned enckeylen, unsigned authkeylen)
{
struct crypto_authenc_key_param *param;
struct rtattr *rta;
rta = (struct rtattr *)p;
param = RTA_DATA(rta);
param->enckeylen = cpu_to_be32(enckeylen);
rta->rta_len = RTA_LENGTH(sizeof(*param));
rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
p += RTA_SPACE(sizeof(*param));
memcpy(p, key + enckeylen, authkeylen);
p += authkeylen;
memcpy(p, key, enckeylen);
}
static int crypt_setkey(struct crypt_config *cc)
{
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* geniv.h: common interface for IV generation algorithms
*
* Copyright (C) 2018, Linaro
*
* This file define the data structure the user should pass to the template.
*/
#ifndef _CRYPTO_GENIV_H
#define _CRYPTO_GENIV_H
#include <linux/types.h>
enum setkey_op {
SETKEY_OP_INIT,
SETKEY_OP_SET,
SETKEY_OP_WIPE,
};
struct geniv_key_info {
enum setkey_op keyop;
unsigned int tfms_count;
u8 *key;
char *ivopts;
sector_t iv_offset;
unsigned long cipher_flags;
unsigned short int sector_size;
unsigned int key_size;
unsigned int key_parts;
unsigned int key_mac_size;
unsigned int on_disk_tag_size;
};
struct geniv_req_info {
sector_t cc_sector;
unsigned int nents;
u8 *integrity_metadata;
};
#endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册