提交 c3b734dd 编写于 作者: M Martin Willi 提交者: Herbert Xu

crypto: x86/chacha20 - Support partial lengths in 8-block AVX2 variant

Add a length argument to the eight block function for AVX2, so the
block function may XOR only a partial length of eight blocks.

To avoid unnecessary operations, we integrate XORing of the first four
blocks in the final lane interleaving; this also avoids some work in
the partial lengths path.
Signed-off-by: NMartin Willi <martin@strongswan.org>
Signed-off-by: NHerbert Xu <herbert@gondor.apana.org.au>
上级 db8e15a2
...@@ -30,8 +30,9 @@ CTRINC: .octa 0x00000003000000020000000100000000 ...@@ -30,8 +30,9 @@ CTRINC: .octa 0x00000003000000020000000100000000
ENTRY(chacha20_8block_xor_avx2) ENTRY(chacha20_8block_xor_avx2)
# %rdi: Input state matrix, s # %rdi: Input state matrix, s
# %rsi: 8 data blocks output, o # %rsi: up to 8 data blocks output, o
# %rdx: 8 data blocks input, i # %rdx: up to 8 data blocks input, i
# %rcx: input/output length in bytes
# This function encrypts eight consecutive ChaCha20 blocks by loading # This function encrypts eight consecutive ChaCha20 blocks by loading
# the state matrix in AVX registers eight times. As we need some # the state matrix in AVX registers eight times. As we need some
...@@ -48,6 +49,7 @@ ENTRY(chacha20_8block_xor_avx2) ...@@ -48,6 +49,7 @@ ENTRY(chacha20_8block_xor_avx2)
lea 8(%rsp),%r10 lea 8(%rsp),%r10
and $~31, %rsp and $~31, %rsp
sub $0x80, %rsp sub $0x80, %rsp
mov %rcx,%rax
# x0..15[0-7] = s[0..15] # x0..15[0-7] = s[0..15]
vpbroadcastd 0x00(%rdi),%ymm0 vpbroadcastd 0x00(%rdi),%ymm0
...@@ -375,74 +377,143 @@ ENTRY(chacha20_8block_xor_avx2) ...@@ -375,74 +377,143 @@ ENTRY(chacha20_8block_xor_avx2)
vpunpckhqdq %ymm15,%ymm0,%ymm15 vpunpckhqdq %ymm15,%ymm0,%ymm15
# interleave 128-bit words in state n, n+4 # interleave 128-bit words in state n, n+4
vmovdqa 0x00(%rsp),%ymm0 # xor/write first four blocks
vperm2i128 $0x20,%ymm4,%ymm0,%ymm1 vmovdqa 0x00(%rsp),%ymm1
vperm2i128 $0x31,%ymm4,%ymm0,%ymm4 vperm2i128 $0x20,%ymm4,%ymm1,%ymm0
vmovdqa %ymm1,0x00(%rsp) cmp $0x0020,%rax
vmovdqa 0x20(%rsp),%ymm0 jl .Lxorpart8
vperm2i128 $0x20,%ymm5,%ymm0,%ymm1 vpxor 0x0000(%rdx),%ymm0,%ymm0
vperm2i128 $0x31,%ymm5,%ymm0,%ymm5 vmovdqu %ymm0,0x0000(%rsi)
vmovdqa %ymm1,0x20(%rsp) vperm2i128 $0x31,%ymm4,%ymm1,%ymm4
vmovdqa 0x40(%rsp),%ymm0
vperm2i128 $0x20,%ymm6,%ymm0,%ymm1
vperm2i128 $0x31,%ymm6,%ymm0,%ymm6
vmovdqa %ymm1,0x40(%rsp)
vmovdqa 0x60(%rsp),%ymm0
vperm2i128 $0x20,%ymm7,%ymm0,%ymm1
vperm2i128 $0x31,%ymm7,%ymm0,%ymm7
vmovdqa %ymm1,0x60(%rsp)
vperm2i128 $0x20,%ymm12,%ymm8,%ymm0 vperm2i128 $0x20,%ymm12,%ymm8,%ymm0
cmp $0x0040,%rax
jl .Lxorpart8
vpxor 0x0020(%rdx),%ymm0,%ymm0
vmovdqu %ymm0,0x0020(%rsi)
vperm2i128 $0x31,%ymm12,%ymm8,%ymm12 vperm2i128 $0x31,%ymm12,%ymm8,%ymm12
vmovdqa %ymm0,%ymm8
vperm2i128 $0x20,%ymm13,%ymm9,%ymm0 vmovdqa 0x40(%rsp),%ymm1
vperm2i128 $0x31,%ymm13,%ymm9,%ymm13 vperm2i128 $0x20,%ymm6,%ymm1,%ymm0
vmovdqa %ymm0,%ymm9 cmp $0x0060,%rax
jl .Lxorpart8
vpxor 0x0040(%rdx),%ymm0,%ymm0
vmovdqu %ymm0,0x0040(%rsi)
vperm2i128 $0x31,%ymm6,%ymm1,%ymm6
vperm2i128 $0x20,%ymm14,%ymm10,%ymm0 vperm2i128 $0x20,%ymm14,%ymm10,%ymm0
cmp $0x0080,%rax
jl .Lxorpart8
vpxor 0x0060(%rdx),%ymm0,%ymm0
vmovdqu %ymm0,0x0060(%rsi)
vperm2i128 $0x31,%ymm14,%ymm10,%ymm14 vperm2i128 $0x31,%ymm14,%ymm10,%ymm14
vmovdqa %ymm0,%ymm10
vperm2i128 $0x20,%ymm15,%ymm11,%ymm0
vperm2i128 $0x31,%ymm15,%ymm11,%ymm15
vmovdqa %ymm0,%ymm11
# xor with corresponding input, write to output vmovdqa 0x20(%rsp),%ymm1
vmovdqa 0x00(%rsp),%ymm0 vperm2i128 $0x20,%ymm5,%ymm1,%ymm0
vpxor 0x0000(%rdx),%ymm0,%ymm0 cmp $0x00a0,%rax
vmovdqu %ymm0,0x0000(%rsi) jl .Lxorpart8
vmovdqa 0x20(%rsp),%ymm0
vpxor 0x0080(%rdx),%ymm0,%ymm0 vpxor 0x0080(%rdx),%ymm0,%ymm0
vmovdqu %ymm0,0x0080(%rsi) vmovdqu %ymm0,0x0080(%rsi)
vmovdqa 0x40(%rsp),%ymm0 vperm2i128 $0x31,%ymm5,%ymm1,%ymm5
vpxor 0x0040(%rdx),%ymm0,%ymm0
vmovdqu %ymm0,0x0040(%rsi) vperm2i128 $0x20,%ymm13,%ymm9,%ymm0
vmovdqa 0x60(%rsp),%ymm0 cmp $0x00c0,%rax
jl .Lxorpart8
vpxor 0x00a0(%rdx),%ymm0,%ymm0
vmovdqu %ymm0,0x00a0(%rsi)
vperm2i128 $0x31,%ymm13,%ymm9,%ymm13
vmovdqa 0x60(%rsp),%ymm1
vperm2i128 $0x20,%ymm7,%ymm1,%ymm0
cmp $0x00e0,%rax
jl .Lxorpart8
vpxor 0x00c0(%rdx),%ymm0,%ymm0 vpxor 0x00c0(%rdx),%ymm0,%ymm0
vmovdqu %ymm0,0x00c0(%rsi) vmovdqu %ymm0,0x00c0(%rsi)
vpxor 0x0100(%rdx),%ymm4,%ymm4 vperm2i128 $0x31,%ymm7,%ymm1,%ymm7
vmovdqu %ymm4,0x0100(%rsi)
vpxor 0x0180(%rdx),%ymm5,%ymm5 vperm2i128 $0x20,%ymm15,%ymm11,%ymm0
vmovdqu %ymm5,0x00180(%rsi) cmp $0x0100,%rax
vpxor 0x0140(%rdx),%ymm6,%ymm6 jl .Lxorpart8
vmovdqu %ymm6,0x0140(%rsi) vpxor 0x00e0(%rdx),%ymm0,%ymm0
vpxor 0x01c0(%rdx),%ymm7,%ymm7 vmovdqu %ymm0,0x00e0(%rsi)
vmovdqu %ymm7,0x01c0(%rsi) vperm2i128 $0x31,%ymm15,%ymm11,%ymm15
vpxor 0x0020(%rdx),%ymm8,%ymm8
vmovdqu %ymm8,0x0020(%rsi) # xor remaining blocks, write to output
vpxor 0x00a0(%rdx),%ymm9,%ymm9 vmovdqa %ymm4,%ymm0
vmovdqu %ymm9,0x00a0(%rsi) cmp $0x0120,%rax
vpxor 0x0060(%rdx),%ymm10,%ymm10 jl .Lxorpart8
vmovdqu %ymm10,0x0060(%rsi) vpxor 0x0100(%rdx),%ymm0,%ymm0
vpxor 0x00e0(%rdx),%ymm11,%ymm11 vmovdqu %ymm0,0x0100(%rsi)
vmovdqu %ymm11,0x00e0(%rsi)
vpxor 0x0120(%rdx),%ymm12,%ymm12
vmovdqu %ymm12,0x0120(%rsi)
vpxor 0x01a0(%rdx),%ymm13,%ymm13
vmovdqu %ymm13,0x01a0(%rsi)
vpxor 0x0160(%rdx),%ymm14,%ymm14
vmovdqu %ymm14,0x0160(%rsi)
vpxor 0x01e0(%rdx),%ymm15,%ymm15
vmovdqu %ymm15,0x01e0(%rsi)
vmovdqa %ymm12,%ymm0
cmp $0x0140,%rax
jl .Lxorpart8
vpxor 0x0120(%rdx),%ymm0,%ymm0
vmovdqu %ymm0,0x0120(%rsi)
vmovdqa %ymm6,%ymm0
cmp $0x0160,%rax
jl .Lxorpart8
vpxor 0x0140(%rdx),%ymm0,%ymm0
vmovdqu %ymm0,0x0140(%rsi)
vmovdqa %ymm14,%ymm0
cmp $0x0180,%rax
jl .Lxorpart8
vpxor 0x0160(%rdx),%ymm0,%ymm0
vmovdqu %ymm0,0x0160(%rsi)
vmovdqa %ymm5,%ymm0
cmp $0x01a0,%rax
jl .Lxorpart8
vpxor 0x0180(%rdx),%ymm0,%ymm0
vmovdqu %ymm0,0x0180(%rsi)
vmovdqa %ymm13,%ymm0
cmp $0x01c0,%rax
jl .Lxorpart8
vpxor 0x01a0(%rdx),%ymm0,%ymm0
vmovdqu %ymm0,0x01a0(%rsi)
vmovdqa %ymm7,%ymm0
cmp $0x01e0,%rax
jl .Lxorpart8
vpxor 0x01c0(%rdx),%ymm0,%ymm0
vmovdqu %ymm0,0x01c0(%rsi)
vmovdqa %ymm15,%ymm0
cmp $0x0200,%rax
jl .Lxorpart8
vpxor 0x01e0(%rdx),%ymm0,%ymm0
vmovdqu %ymm0,0x01e0(%rsi)
.Ldone8:
vzeroupper vzeroupper
lea -8(%r10),%rsp lea -8(%r10),%rsp
ret ret
.Lxorpart8:
# xor remaining bytes from partial register into output
mov %rax,%r9
and $0x1f,%r9
jz .Ldone8
and $~0x1f,%rax
mov %rsi,%r11
lea (%rdx,%rax),%rsi
mov %rsp,%rdi
mov %r9,%rcx
rep movsb
vpxor 0x00(%rsp),%ymm0,%ymm0
vmovdqa %ymm0,0x00(%rsp)
mov %rsp,%rsi
lea (%r11,%rax),%rdi
mov %r9,%rcx
rep movsb
jmp .Ldone8
ENDPROC(chacha20_8block_xor_avx2) ENDPROC(chacha20_8block_xor_avx2)
...@@ -24,7 +24,8 @@ asmlinkage void chacha20_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src, ...@@ -24,7 +24,8 @@ asmlinkage void chacha20_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src,
asmlinkage void chacha20_4block_xor_ssse3(u32 *state, u8 *dst, const u8 *src, asmlinkage void chacha20_4block_xor_ssse3(u32 *state, u8 *dst, const u8 *src,
unsigned int len); unsigned int len);
#ifdef CONFIG_AS_AVX2 #ifdef CONFIG_AS_AVX2
asmlinkage void chacha20_8block_xor_avx2(u32 *state, u8 *dst, const u8 *src); asmlinkage void chacha20_8block_xor_avx2(u32 *state, u8 *dst, const u8 *src,
unsigned int len);
static bool chacha20_use_avx2; static bool chacha20_use_avx2;
#endif #endif
...@@ -34,7 +35,7 @@ static void chacha20_dosimd(u32 *state, u8 *dst, const u8 *src, ...@@ -34,7 +35,7 @@ static void chacha20_dosimd(u32 *state, u8 *dst, const u8 *src,
#ifdef CONFIG_AS_AVX2 #ifdef CONFIG_AS_AVX2
if (chacha20_use_avx2) { if (chacha20_use_avx2) {
while (bytes >= CHACHA20_BLOCK_SIZE * 8) { while (bytes >= CHACHA20_BLOCK_SIZE * 8) {
chacha20_8block_xor_avx2(state, dst, src); chacha20_8block_xor_avx2(state, dst, src, bytes);
bytes -= CHACHA20_BLOCK_SIZE * 8; bytes -= CHACHA20_BLOCK_SIZE * 8;
src += CHACHA20_BLOCK_SIZE * 8; src += CHACHA20_BLOCK_SIZE * 8;
dst += CHACHA20_BLOCK_SIZE * 8; dst += CHACHA20_BLOCK_SIZE * 8;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册