提交 6fcb81b5 编写于 作者: E Eric Biggers 提交者: Herbert Xu

crypto: x86/camellia-aesni-avx - remove LRW algorithm

The LRW template now wraps an ECB mode algorithm rather than the block
cipher directly.  Therefore it is now redundant for crypto modules to
wrap their ECB code with generic LRW code themselves via lrw_crypt().

Remove the lrw-camellia-aesni algorithm which did this.  Users who
request lrw(camellia) and previously would have gotten
lrw-camellia-aesni will now get lrw(ecb-camellia-aesni) instead, which
is just as fast.
Signed-off-by: NEric Biggers <ebiggers@google.com>
Signed-off-by: NHerbert Xu <herbert@gondor.apana.org.au>
上级 09c0f03b
......@@ -17,7 +17,6 @@
#include <crypto/ablk_helper.h>
#include <crypto/algapi.h>
#include <crypto/ctr.h>
#include <crypto/lrw.h>
#include <crypto/xts.h>
#include <asm/fpu/api.h>
#include <asm/crypto/camellia.h>
......@@ -186,18 +185,6 @@ static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
return glue_ctr_crypt_128bit(&camellia_ctr, desc, dst, src, nbytes);
}
static inline bool camellia_fpu_begin(bool fpu_enabled, unsigned int nbytes)
{
return glue_fpu_begin(CAMELLIA_BLOCK_SIZE,
CAMELLIA_AESNI_PARALLEL_BLOCKS, NULL, fpu_enabled,
nbytes);
}
static inline void camellia_fpu_end(bool fpu_enabled)
{
glue_fpu_end(fpu_enabled);
}
static int camellia_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
......@@ -205,111 +192,6 @@ static int camellia_setkey(struct crypto_tfm *tfm, const u8 *in_key,
&tfm->crt_flags);
}
struct crypt_priv {
struct camellia_ctx *ctx;
bool fpu_enabled;
};
static void encrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
{
const unsigned int bsize = CAMELLIA_BLOCK_SIZE;
struct crypt_priv *ctx = priv;
int i;
ctx->fpu_enabled = camellia_fpu_begin(ctx->fpu_enabled, nbytes);
if (nbytes >= CAMELLIA_AESNI_PARALLEL_BLOCKS * bsize) {
camellia_ecb_enc_16way(ctx->ctx, srcdst, srcdst);
srcdst += bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
nbytes -= bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
}
while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
camellia_enc_blk_2way(ctx->ctx, srcdst, srcdst);
srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
}
for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
camellia_enc_blk(ctx->ctx, srcdst, srcdst);
}
static void decrypt_callback(void *priv, u8 *srcdst, unsigned int nbytes)
{
const unsigned int bsize = CAMELLIA_BLOCK_SIZE;
struct crypt_priv *ctx = priv;
int i;
ctx->fpu_enabled = camellia_fpu_begin(ctx->fpu_enabled, nbytes);
if (nbytes >= CAMELLIA_AESNI_PARALLEL_BLOCKS * bsize) {
camellia_ecb_dec_16way(ctx->ctx, srcdst, srcdst);
srcdst += bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
nbytes -= bsize * CAMELLIA_AESNI_PARALLEL_BLOCKS;
}
while (nbytes >= CAMELLIA_PARALLEL_BLOCKS * bsize) {
camellia_dec_blk_2way(ctx->ctx, srcdst, srcdst);
srcdst += bsize * CAMELLIA_PARALLEL_BLOCKS;
nbytes -= bsize * CAMELLIA_PARALLEL_BLOCKS;
}
for (i = 0; i < nbytes / bsize; i++, srcdst += bsize)
camellia_dec_blk(ctx->ctx, srcdst, srcdst);
}
static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct camellia_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
be128 buf[CAMELLIA_AESNI_PARALLEL_BLOCKS];
struct crypt_priv crypt_ctx = {
.ctx = &ctx->camellia_ctx,
.fpu_enabled = false,
};
struct lrw_crypt_req req = {
.tbuf = buf,
.tbuflen = sizeof(buf),
.table_ctx = &ctx->lrw_table,
.crypt_ctx = &crypt_ctx,
.crypt_fn = encrypt_callback,
};
int ret;
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
ret = lrw_crypt(desc, dst, src, nbytes, &req);
camellia_fpu_end(crypt_ctx.fpu_enabled);
return ret;
}
static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct camellia_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
be128 buf[CAMELLIA_AESNI_PARALLEL_BLOCKS];
struct crypt_priv crypt_ctx = {
.ctx = &ctx->camellia_ctx,
.fpu_enabled = false,
};
struct lrw_crypt_req req = {
.tbuf = buf,
.tbuflen = sizeof(buf),
.table_ctx = &ctx->lrw_table,
.crypt_ctx = &crypt_ctx,
.crypt_fn = decrypt_callback,
};
int ret;
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
ret = lrw_crypt(desc, dst, src, nbytes, &req);
camellia_fpu_end(crypt_ctx.fpu_enabled);
return ret;
}
static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
......@@ -330,7 +212,7 @@ static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
&ctx->tweak_ctx, &ctx->crypt_ctx);
}
static struct crypto_alg cmll_algs[10] = { {
static struct crypto_alg cmll_algs[] = { {
.cra_name = "__ecb-camellia-aesni",
.cra_driver_name = "__driver-ecb-camellia-aesni",
.cra_priority = 0,
......@@ -391,30 +273,6 @@ static struct crypto_alg cmll_algs[10] = { {
.decrypt = ctr_crypt,
},
},
}, {
.cra_name = "__lrw-camellia-aesni",
.cra_driver_name = "__driver-lrw-camellia-aesni",
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_INTERNAL,
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct camellia_lrw_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_exit = lrw_camellia_exit_tfm,
.cra_u = {
.blkcipher = {
.min_keysize = CAMELLIA_MIN_KEY_SIZE +
CAMELLIA_BLOCK_SIZE,
.max_keysize = CAMELLIA_MAX_KEY_SIZE +
CAMELLIA_BLOCK_SIZE,
.ivsize = CAMELLIA_BLOCK_SIZE,
.setkey = lrw_camellia_setkey,
.encrypt = lrw_encrypt,
.decrypt = lrw_decrypt,
},
},
}, {
.cra_name = "__xts-camellia-aesni",
.cra_driver_name = "__driver-xts-camellia-aesni",
......@@ -502,30 +360,6 @@ static struct crypto_alg cmll_algs[10] = { {
.geniv = "chainiv",
},
},
}, {
.cra_name = "lrw(camellia)",
.cra_driver_name = "lrw-camellia-aesni",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct async_helper_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_u = {
.ablkcipher = {
.min_keysize = CAMELLIA_MIN_KEY_SIZE +
CAMELLIA_BLOCK_SIZE,
.max_keysize = CAMELLIA_MAX_KEY_SIZE +
CAMELLIA_BLOCK_SIZE,
.ivsize = CAMELLIA_BLOCK_SIZE,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_decrypt,
},
},
}, {
.cra_name = "xts(camellia)",
.cra_driver_name = "xts-camellia-aesni",
......
......@@ -2,6 +2,7 @@
#ifndef ASM_X86_CAMELLIA_H
#define ASM_X86_CAMELLIA_H
#include <crypto/lrw.h>
#include <linux/kernel.h>
#include <linux/crypto.h>
......
......@@ -1169,7 +1169,6 @@ config CRYPTO_CAMELLIA_AESNI_AVX_X86_64
select CRYPTO_ABLK_HELPER
select CRYPTO_GLUE_HELPER_X86
select CRYPTO_CAMELLIA_X86_64
select CRYPTO_LRW
select CRYPTO_XTS
help
Camellia cipher algorithm module (x86_64/AES-NI/AVX).
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册