提交 b2eadbf4 编写于 作者: A Ard Biesheuvel 提交者: Herbert Xu

crypto: arm64/sha2-ce - simplify NEON yield

Instead of calling into kernel_neon_end() and kernel_neon_begin() (and
potentially into schedule()) from the assembler code when running in
task mode and a reschedule is pending, perform only the preempt count
check in assembler, but simply return early in this case, and let the C
code deal with the consequences.

This reverts commit d82f37ab.
Signed-off-by: NArd Biesheuvel <ardb@kernel.org>
Signed-off-by: NHerbert Xu <herbert@gondor.apana.org.au>
上级 5a69e1b7
...@@ -76,36 +76,30 @@ ...@@ -76,36 +76,30 @@
*/ */
.text .text
SYM_FUNC_START(sha2_ce_transform) SYM_FUNC_START(sha2_ce_transform)
frame_push 3
mov x19, x0
mov x20, x1
mov x21, x2
/* load round constants */ /* load round constants */
0: adr_l x8, .Lsha2_rcon adr_l x8, .Lsha2_rcon
ld1 { v0.4s- v3.4s}, [x8], #64 ld1 { v0.4s- v3.4s}, [x8], #64
ld1 { v4.4s- v7.4s}, [x8], #64 ld1 { v4.4s- v7.4s}, [x8], #64
ld1 { v8.4s-v11.4s}, [x8], #64 ld1 { v8.4s-v11.4s}, [x8], #64
ld1 {v12.4s-v15.4s}, [x8] ld1 {v12.4s-v15.4s}, [x8]
/* load state */ /* load state */
ld1 {dgav.4s, dgbv.4s}, [x19] ld1 {dgav.4s, dgbv.4s}, [x0]
/* load sha256_ce_state::finalize */ /* load sha256_ce_state::finalize */
ldr_l w4, sha256_ce_offsetof_finalize, x4 ldr_l w4, sha256_ce_offsetof_finalize, x4
ldr w4, [x19, x4] ldr w4, [x0, x4]
/* load input */ /* load input */
1: ld1 {v16.4s-v19.4s}, [x20], #64 0: ld1 {v16.4s-v19.4s}, [x1], #64
sub w21, w21, #1 sub w2, w2, #1
CPU_LE( rev32 v16.16b, v16.16b ) CPU_LE( rev32 v16.16b, v16.16b )
CPU_LE( rev32 v17.16b, v17.16b ) CPU_LE( rev32 v17.16b, v17.16b )
CPU_LE( rev32 v18.16b, v18.16b ) CPU_LE( rev32 v18.16b, v18.16b )
CPU_LE( rev32 v19.16b, v19.16b ) CPU_LE( rev32 v19.16b, v19.16b )
2: add t0.4s, v16.4s, v0.4s 1: add t0.4s, v16.4s, v0.4s
mov dg0v.16b, dgav.16b mov dg0v.16b, dgav.16b
mov dg1v.16b, dgbv.16b mov dg1v.16b, dgbv.16b
...@@ -134,24 +128,18 @@ CPU_LE( rev32 v19.16b, v19.16b ) ...@@ -134,24 +128,18 @@ CPU_LE( rev32 v19.16b, v19.16b )
add dgbv.4s, dgbv.4s, dg1v.4s add dgbv.4s, dgbv.4s, dg1v.4s
/* handled all input blocks? */ /* handled all input blocks? */
cbz w21, 3f cbz w2, 2f
cond_yield 3f, x5
if_will_cond_yield_neon
st1 {dgav.4s, dgbv.4s}, [x19]
do_cond_yield_neon
b 0b b 0b
endif_yield_neon
b 1b
/* /*
* Final block: add padding and total bit count. * Final block: add padding and total bit count.
* Skip if the input size was not a round multiple of the block size, * Skip if the input size was not a round multiple of the block size,
* the padding is handled by the C code in that case. * the padding is handled by the C code in that case.
*/ */
3: cbz x4, 4f 2: cbz x4, 3f
ldr_l w4, sha256_ce_offsetof_count, x4 ldr_l w4, sha256_ce_offsetof_count, x4
ldr x4, [x19, x4] ldr x4, [x0, x4]
movi v17.2d, #0 movi v17.2d, #0
mov x8, #0x80000000 mov x8, #0x80000000
movi v18.2d, #0 movi v18.2d, #0
...@@ -160,10 +148,10 @@ CPU_LE( rev32 v19.16b, v19.16b ) ...@@ -160,10 +148,10 @@ CPU_LE( rev32 v19.16b, v19.16b )
mov x4, #0 mov x4, #0
mov v19.d[0], xzr mov v19.d[0], xzr
mov v19.d[1], x7 mov v19.d[1], x7
b 2b b 1b
/* store new state */ /* store new state */
4: st1 {dgav.4s, dgbv.4s}, [x19] 3: st1 {dgav.4s, dgbv.4s}, [x0]
frame_pop mov w0, w2
ret ret
SYM_FUNC_END(sha2_ce_transform) SYM_FUNC_END(sha2_ce_transform)
...@@ -30,14 +30,22 @@ struct sha256_ce_state { ...@@ -30,14 +30,22 @@ struct sha256_ce_state {
extern const u32 sha256_ce_offsetof_count; extern const u32 sha256_ce_offsetof_count;
extern const u32 sha256_ce_offsetof_finalize; extern const u32 sha256_ce_offsetof_finalize;
asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src, asmlinkage int sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
int blocks); int blocks);
static void __sha2_ce_transform(struct sha256_state *sst, u8 const *src, static void __sha2_ce_transform(struct sha256_state *sst, u8 const *src,
int blocks) int blocks)
{ {
sha2_ce_transform(container_of(sst, struct sha256_ce_state, sst), src, while (blocks) {
blocks); int rem;
kernel_neon_begin();
rem = sha2_ce_transform(container_of(sst, struct sha256_ce_state,
sst), src, blocks);
kernel_neon_end();
src += (blocks - rem) * SHA256_BLOCK_SIZE;
blocks = rem;
}
} }
const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state, const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
...@@ -63,9 +71,7 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data, ...@@ -63,9 +71,7 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
__sha256_block_data_order); __sha256_block_data_order);
sctx->finalize = 0; sctx->finalize = 0;
kernel_neon_begin();
sha256_base_do_update(desc, data, len, __sha2_ce_transform); sha256_base_do_update(desc, data, len, __sha2_ce_transform);
kernel_neon_end();
return 0; return 0;
} }
...@@ -90,11 +96,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data, ...@@ -90,11 +96,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
*/ */
sctx->finalize = finalize; sctx->finalize = finalize;
kernel_neon_begin();
sha256_base_do_update(desc, data, len, __sha2_ce_transform); sha256_base_do_update(desc, data, len, __sha2_ce_transform);
if (!finalize) if (!finalize)
sha256_base_do_finalize(desc, __sha2_ce_transform); sha256_base_do_finalize(desc, __sha2_ce_transform);
kernel_neon_end();
return sha256_base_finish(desc, out); return sha256_base_finish(desc, out);
} }
...@@ -108,9 +112,7 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out) ...@@ -108,9 +112,7 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out)
} }
sctx->finalize = 0; sctx->finalize = 0;
kernel_neon_begin();
sha256_base_do_finalize(desc, __sha2_ce_transform); sha256_base_do_finalize(desc, __sha2_ce_transform);
kernel_neon_end();
return sha256_base_finish(desc, out); return sha256_base_finish(desc, out);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册