提交 fcb0e30d 编写于 作者: A Ard Biesheuvel 提交者: Herbert Xu

crypto: arm/aes - fix round key prototypes

The AES round keys are arrays of u32s in native endianness now, so
update the function prototypes accordingly.
Signed-off-by: NArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: NHerbert Xu <herbert@gondor.apana.org.au>
上级 0ba3c026
...@@ -154,9 +154,9 @@ ENDPROC(aes_decrypt_3x) ...@@ -154,9 +154,9 @@ ENDPROC(aes_decrypt_3x)
.endm .endm
/* /*
* aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, * aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[], int rounds,
* int blocks) * int blocks)
* aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, * aes_ecb_decrypt(u8 out[], u8 const in[], u32 const rk[], int rounds,
* int blocks) * int blocks)
*/ */
ENTRY(ce_aes_ecb_encrypt) ENTRY(ce_aes_ecb_encrypt)
...@@ -212,9 +212,9 @@ ENTRY(ce_aes_ecb_decrypt) ...@@ -212,9 +212,9 @@ ENTRY(ce_aes_ecb_decrypt)
ENDPROC(ce_aes_ecb_decrypt) ENDPROC(ce_aes_ecb_decrypt)
/* /*
* aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, * aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[], int rounds,
* int blocks, u8 iv[]) * int blocks, u8 iv[])
* aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, * aes_cbc_decrypt(u8 out[], u8 const in[], u32 const rk[], int rounds,
* int blocks, u8 iv[]) * int blocks, u8 iv[])
*/ */
ENTRY(ce_aes_cbc_encrypt) ENTRY(ce_aes_cbc_encrypt)
...@@ -272,7 +272,7 @@ ENTRY(ce_aes_cbc_decrypt) ...@@ -272,7 +272,7 @@ ENTRY(ce_aes_cbc_decrypt)
ENDPROC(ce_aes_cbc_decrypt) ENDPROC(ce_aes_cbc_decrypt)
/* /*
* aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, * aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[], int rounds,
* int blocks, u8 ctr[]) * int blocks, u8 ctr[])
*/ */
ENTRY(ce_aes_ctr_encrypt) ENTRY(ce_aes_ctr_encrypt)
...@@ -349,10 +349,10 @@ ENTRY(ce_aes_ctr_encrypt) ...@@ -349,10 +349,10 @@ ENTRY(ce_aes_ctr_encrypt)
ENDPROC(ce_aes_ctr_encrypt) ENDPROC(ce_aes_ctr_encrypt)
/* /*
* aes_xts_encrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds, * aes_xts_encrypt(u8 out[], u8 const in[], u32 const rk1[], int rounds,
* int blocks, u8 iv[], u8 const rk2[], int first) * int blocks, u8 iv[], u32 const rk2[], int first)
* aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[], int rounds, * aes_xts_decrypt(u8 out[], u8 const in[], u32 const rk1[], int rounds,
* int blocks, u8 iv[], u8 const rk2[], int first) * int blocks, u8 iv[], u32 const rk2[], int first)
*/ */
.macro next_tweak, out, in, const, tmp .macro next_tweak, out, in, const, tmp
......
...@@ -25,25 +25,25 @@ MODULE_LICENSE("GPL v2"); ...@@ -25,25 +25,25 @@ MODULE_LICENSE("GPL v2");
asmlinkage u32 ce_aes_sub(u32 input); asmlinkage u32 ce_aes_sub(u32 input);
asmlinkage void ce_aes_invert(void *dst, void *src); asmlinkage void ce_aes_invert(void *dst, void *src);
asmlinkage void ce_aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], asmlinkage void ce_aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[],
int rounds, int blocks); int rounds, int blocks);
asmlinkage void ce_aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], asmlinkage void ce_aes_ecb_decrypt(u8 out[], u8 const in[], u32 const rk[],
int rounds, int blocks); int rounds, int blocks);
asmlinkage void ce_aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[], asmlinkage void ce_aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[],
int rounds, int blocks, u8 iv[]); int rounds, int blocks, u8 iv[]);
asmlinkage void ce_aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], asmlinkage void ce_aes_cbc_decrypt(u8 out[], u8 const in[], u32 const rk[],
int rounds, int blocks, u8 iv[]); int rounds, int blocks, u8 iv[]);
asmlinkage void ce_aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], asmlinkage void ce_aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[],
int rounds, int blocks, u8 ctr[]); int rounds, int blocks, u8 ctr[]);
asmlinkage void ce_aes_xts_encrypt(u8 out[], u8 const in[], u8 const rk1[], asmlinkage void ce_aes_xts_encrypt(u8 out[], u8 const in[], u32 const rk1[],
int rounds, int blocks, u8 iv[], int rounds, int blocks, u8 iv[],
u8 const rk2[], int first); u32 const rk2[], int first);
asmlinkage void ce_aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[], asmlinkage void ce_aes_xts_decrypt(u8 out[], u8 const in[], u32 const rk1[],
int rounds, int blocks, u8 iv[], int rounds, int blocks, u8 iv[],
u8 const rk2[], int first); u32 const rk2[], int first);
struct aes_block { struct aes_block {
u8 b[AES_BLOCK_SIZE]; u8 b[AES_BLOCK_SIZE];
...@@ -182,7 +182,7 @@ static int ecb_encrypt(struct skcipher_request *req) ...@@ -182,7 +182,7 @@ static int ecb_encrypt(struct skcipher_request *req)
kernel_neon_begin(); kernel_neon_begin();
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
ce_aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr, ce_aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_enc, num_rounds(ctx), blocks); ctx->key_enc, num_rounds(ctx), blocks);
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
} }
kernel_neon_end(); kernel_neon_end();
...@@ -202,7 +202,7 @@ static int ecb_decrypt(struct skcipher_request *req) ...@@ -202,7 +202,7 @@ static int ecb_decrypt(struct skcipher_request *req)
kernel_neon_begin(); kernel_neon_begin();
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
ce_aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr, ce_aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_dec, num_rounds(ctx), blocks); ctx->key_dec, num_rounds(ctx), blocks);
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
} }
kernel_neon_end(); kernel_neon_end();
...@@ -222,7 +222,7 @@ static int cbc_encrypt(struct skcipher_request *req) ...@@ -222,7 +222,7 @@ static int cbc_encrypt(struct skcipher_request *req)
kernel_neon_begin(); kernel_neon_begin();
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
ce_aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr, ce_aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_enc, num_rounds(ctx), blocks, ctx->key_enc, num_rounds(ctx), blocks,
walk.iv); walk.iv);
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
} }
...@@ -243,7 +243,7 @@ static int cbc_decrypt(struct skcipher_request *req) ...@@ -243,7 +243,7 @@ static int cbc_decrypt(struct skcipher_request *req)
kernel_neon_begin(); kernel_neon_begin();
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
ce_aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, ce_aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_dec, num_rounds(ctx), blocks, ctx->key_dec, num_rounds(ctx), blocks,
walk.iv); walk.iv);
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
} }
...@@ -263,7 +263,7 @@ static int ctr_encrypt(struct skcipher_request *req) ...@@ -263,7 +263,7 @@ static int ctr_encrypt(struct skcipher_request *req)
kernel_neon_begin(); kernel_neon_begin();
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
ce_aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr, ce_aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_enc, num_rounds(ctx), blocks, ctx->key_enc, num_rounds(ctx), blocks,
walk.iv); walk.iv);
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
} }
...@@ -278,8 +278,8 @@ static int ctr_encrypt(struct skcipher_request *req) ...@@ -278,8 +278,8 @@ static int ctr_encrypt(struct skcipher_request *req)
*/ */
blocks = -1; blocks = -1;
ce_aes_ctr_encrypt(tail, NULL, (u8 *)ctx->key_enc, ce_aes_ctr_encrypt(tail, NULL, ctx->key_enc, num_rounds(ctx),
num_rounds(ctx), blocks, walk.iv); blocks, walk.iv);
crypto_xor_cpy(tdst, tsrc, tail, nbytes); crypto_xor_cpy(tdst, tsrc, tail, nbytes);
err = skcipher_walk_done(&walk, 0); err = skcipher_walk_done(&walk, 0);
} }
...@@ -324,8 +324,8 @@ static int xts_encrypt(struct skcipher_request *req) ...@@ -324,8 +324,8 @@ static int xts_encrypt(struct skcipher_request *req)
kernel_neon_begin(); kernel_neon_begin();
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
ce_aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, ce_aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key1.key_enc, rounds, blocks, ctx->key1.key_enc, rounds, blocks, walk.iv,
walk.iv, (u8 *)ctx->key2.key_enc, first); ctx->key2.key_enc, first);
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
} }
kernel_neon_end(); kernel_neon_end();
...@@ -346,8 +346,8 @@ static int xts_decrypt(struct skcipher_request *req) ...@@ -346,8 +346,8 @@ static int xts_decrypt(struct skcipher_request *req)
kernel_neon_begin(); kernel_neon_begin();
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
ce_aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, ce_aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key1.key_dec, rounds, blocks, ctx->key1.key_dec, rounds, blocks, walk.iv,
walk.iv, (u8 *)ctx->key2.key_enc, first); ctx->key2.key_enc, first);
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
} }
kernel_neon_end(); kernel_neon_end();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册