提交 98e87e3d 编写于 作者: C Christian Lamparter 提交者: Herbert Xu

crypto: crypto4xx - add aes-ctr support

This patch adds support for the aes-ctr skcipher.

name         : ctr(aes)
driver       : ctr-aes-ppc4xx
module       : crypto4xx
priority     : 300
refcnt       : 1
selftest     : passed
internal     : no
type         : skcipher
async        : yes
blocksize    : 16
min keysize  : 16
max keysize  : 32
ivsize       : 16
chunksize    : 16
walksize     : 16

The hardware uses only the last 32-bits as the counter while the
kernel tests (aes_ctr_enc_tv_template[4] for example) expect that
the whole IV is a counter. To make this work, the driver will
fallback if the counter is going to overlow.

The aead's crypto4xx_setup_fallback() function is renamed to
crypto4xx_aead_setup_fallback.
Signed-off-by: NChristian Lamparter <chunkeey@gmail.com>
Signed-off-by: NHerbert Xu <herbert@gondor.apana.org.au>
上级 c4e90650
......@@ -302,6 +302,7 @@ config CRYPTO_DEV_PPC4XX
select CRYPTO_AEAD
select CRYPTO_AES
select CRYPTO_CCM
select CRYPTO_CTR
select CRYPTO_GCM
select CRYPTO_BLKCIPHER
help
......
......@@ -240,6 +240,85 @@ int crypto4xx_rfc3686_decrypt(struct skcipher_request *req)
ctx->sa_out, ctx->sa_len, 0);
}
static int
crypto4xx_ctr_crypt(struct skcipher_request *req, bool encrypt)
{
struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
size_t iv_len = crypto_skcipher_ivsize(cipher);
unsigned int counter = be32_to_cpup((__be32 *)(req->iv + iv_len - 4));
unsigned int nblks = ALIGN(req->cryptlen, AES_BLOCK_SIZE) /
AES_BLOCK_SIZE;
/*
* The hardware uses only the last 32-bits as the counter while the
* kernel tests (aes_ctr_enc_tv_template[4] for example) expect that
* the whole IV is a counter. So fallback if the counter is going to
* overlow.
*/
if (counter + nblks < counter) {
struct skcipher_request *subreq = skcipher_request_ctx(req);
int ret;
skcipher_request_set_tfm(subreq, ctx->sw_cipher.cipher);
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
req->cryptlen, req->iv);
ret = encrypt ? crypto_skcipher_encrypt(subreq)
: crypto_skcipher_decrypt(subreq);
skcipher_request_zero(subreq);
return ret;
}
return encrypt ? crypto4xx_encrypt_iv(req)
: crypto4xx_decrypt_iv(req);
}
static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx,
struct crypto_skcipher *cipher,
const u8 *key,
unsigned int keylen)
{
int rc;
crypto_skcipher_clear_flags(ctx->sw_cipher.cipher,
CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(ctx->sw_cipher.cipher,
crypto_skcipher_get_flags(cipher) & CRYPTO_TFM_REQ_MASK);
rc = crypto_skcipher_setkey(ctx->sw_cipher.cipher, key, keylen);
crypto_skcipher_clear_flags(cipher, CRYPTO_TFM_RES_MASK);
crypto_skcipher_set_flags(cipher,
crypto_skcipher_get_flags(ctx->sw_cipher.cipher) &
CRYPTO_TFM_RES_MASK);
return rc;
}
int crypto4xx_setkey_aes_ctr(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
int rc;
rc = crypto4xx_sk_setup_fallback(ctx, cipher, key, keylen);
if (rc)
return rc;
return crypto4xx_setkey_aes(cipher, key, keylen,
CRYPTO_MODE_CTR, CRYPTO_FEEDBACK_MODE_NO_FB);
}
int crypto4xx_encrypt_ctr(struct skcipher_request *req)
{
return crypto4xx_ctr_crypt(req, true);
}
int crypto4xx_decrypt_ctr(struct skcipher_request *req)
{
return crypto4xx_ctr_crypt(req, false);
}
static inline bool crypto4xx_aead_need_fallback(struct aead_request *req,
bool is_ccm, bool decrypt)
{
......@@ -282,10 +361,10 @@ static int crypto4xx_aead_fallback(struct aead_request *req,
crypto_aead_encrypt(subreq);
}
static int crypto4xx_setup_fallback(struct crypto4xx_ctx *ctx,
struct crypto_aead *cipher,
const u8 *key,
unsigned int keylen)
static int crypto4xx_aead_setup_fallback(struct crypto4xx_ctx *ctx,
struct crypto_aead *cipher,
const u8 *key,
unsigned int keylen)
{
int rc;
......@@ -313,7 +392,7 @@ int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher, const u8 *key,
struct dynamic_sa_ctl *sa;
int rc = 0;
rc = crypto4xx_setup_fallback(ctx, cipher, key, keylen);
rc = crypto4xx_aead_setup_fallback(ctx, cipher, key, keylen);
if (rc)
return rc;
......@@ -472,7 +551,7 @@ int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher,
return -EINVAL;
}
rc = crypto4xx_setup_fallback(ctx, cipher, key, keylen);
rc = crypto4xx_aead_setup_fallback(ctx, cipher, key, keylen);
if (rc)
return rc;
......
......@@ -941,6 +941,19 @@ static int crypto4xx_sk_init(struct crypto_skcipher *sk)
struct crypto4xx_alg *amcc_alg;
struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(sk);
if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
ctx->sw_cipher.cipher =
crypto_alloc_skcipher(alg->base.cra_name, 0,
CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_ASYNC);
if (IS_ERR(ctx->sw_cipher.cipher))
return PTR_ERR(ctx->sw_cipher.cipher);
crypto_skcipher_set_reqsize(sk,
sizeof(struct skcipher_request) + 32 +
crypto_skcipher_reqsize(ctx->sw_cipher.cipher));
}
amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.cipher);
crypto4xx_ctx_init(amcc_alg, ctx);
return 0;
......@@ -956,6 +969,8 @@ static void crypto4xx_sk_exit(struct crypto_skcipher *sk)
struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(sk);
crypto4xx_common_exit(ctx);
if (ctx->sw_cipher.cipher)
crypto_free_skcipher(ctx->sw_cipher.cipher);
}
static int crypto4xx_aead_init(struct crypto_aead *tfm)
......@@ -1145,6 +1160,28 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.init = crypto4xx_sk_init,
.exit = crypto4xx_sk_exit,
} },
{ .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
.base = {
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_module = THIS_MODULE,
},
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_IV_SIZE,
.setkey = crypto4xx_setkey_aes_ctr,
.encrypt = crypto4xx_encrypt_ctr,
.decrypt = crypto4xx_decrypt_ctr,
.init = crypto4xx_sk_init,
.exit = crypto4xx_sk_exit,
} },
{ .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
.base = {
.cra_name = "rfc3686(ctr(aes))",
......
......@@ -128,6 +128,7 @@ struct crypto4xx_ctx {
__le32 iv_nonce;
u32 sa_len;
union {
struct crypto_skcipher *cipher;
struct crypto_aead *aead;
} sw_cipher;
};
......@@ -163,12 +164,16 @@ int crypto4xx_setkey_aes_cbc(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
int crypto4xx_setkey_aes_cfb(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
int crypto4xx_setkey_aes_ctr(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
int crypto4xx_setkey_aes_ecb(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
int crypto4xx_setkey_aes_ofb(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
int crypto4xx_setkey_rfc3686(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
int crypto4xx_encrypt_ctr(struct skcipher_request *req);
int crypto4xx_decrypt_ctr(struct skcipher_request *req);
int crypto4xx_encrypt_iv(struct skcipher_request *req);
int crypto4xx_decrypt_iv(struct skcipher_request *req);
int crypto4xx_encrypt_noiv(struct skcipher_request *req);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册