Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
OpenHarmony
Third Party Openssl
提交
f4e175e4
T
Third Party Openssl
项目概览
OpenHarmony
/
Third Party Openssl
大约 1 年 前同步成功
通知
9
Star
18
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
Third Party Openssl
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
f4e175e4
编写于
12月 15, 2015
作者:
A
Andy Polyakov
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
C64x+ assembly pack: add ChaCha20 and Poly1305 modules.
Reviewed-by:
N
Richard Levitte
<
levitte@openssl.org
>
上级
647097e1
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
1244 addition
and
6 deletion
+1244
-6
Configurations/10-main.conf
Configurations/10-main.conf
+8
-6
crypto/chacha/asm/chacha-c64xplus.pl
crypto/chacha/asm/chacha-c64xplus.pl
+916
-0
crypto/poly1305/asm/poly1305-c64xplus.pl
crypto/poly1305/asm/poly1305-c64xplus.pl
+320
-0
未找到文件。
Configurations/10-main.conf
浏览文件 @
f4e175e4
...
...
@@ -716,12 +716,14 @@
cflags
=>
"--linux -ea=.s -eo=.o -mv6400+ -o2 -ox -ms -pden -DOPENSSL_SMALL_FOOTPRINT"
,
thread_cflag
=>
"-D_REENTRANT"
,
bn_ops
=>
"BN_LLONG"
,
cpuid_obj
=>
"c64xpluscpuid.o"
,
bn_obj
=>
"bn-c64xplus.o c64xplus-gf2m.o"
,
aes_obj
=>
"aes-c64xplus.o aes_cbc.o aes_ctr.o"
,
sha1_obj
=>
"sha1-c64xplus.o sha256-c64xplus.o sha512-c64xplus.o"
,
rc4_obj
=>
"rc4-c64xplus.o"
,
modes_obj
=>
"ghash-c64xplus.o"
,
cpuid_asm_src
=>
"c64xpluscpuid.s"
,
bn_asm_src
=>
"asm/bn-c64xplus.asm c64xplus-gf2m.s"
,
aes_asm_src
=>
"aes-c64xplus.s aes_cbc.c aes-ctr.fake"
,
sha1_asm_src
=>
"sha1-c64xplus.s sha256-c64xplus.s sha512-c64xplus.s"
,
rc4_asm_src
=>
"rc4-c64xplus.s"
,
modes_asm_src
=>
"ghash-c64xplus.s"
,
chacha_asm_src
=>
"chacha-c64xplus.s"
,
poly1305_asm_src
=>
"poly1305-c64xplus.s"
,
perlasm_scheme
=>
"void"
,
dso_scheme
=>
"dlfcn"
,
shared_target
=>
"linux-shared"
,
...
...
crypto/chacha/asm/chacha-c64xplus.pl
0 → 100755
浏览文件 @
f4e175e4
#!/usr/bin/env perl
#
# ====================================================================
# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
# ====================================================================
#
# ChaCha20 for C64x+.
#
# October 2015
#
# Performance is 3.54 cycles per processed byte, which is ~4.3 times
# faster than code generated by TI compiler. Compiler also disables
# interrupts for some reason, thus making interrupt response time
# dependent on input length. This module on the other hand is free
# from such limiation.
(
$OUT
,
$INP
,
$LEN
,
$KEYB
,
$COUNTERA
)
=
("
A4
","
B4
","
A6
","
B6
","
A8
");
(
$KEYA
,
$COUNTERB
,
$STEP
)
=
("
A7
","
B7
","
A3
");
@X
=
("
A16
","
B16
","
A17
","
B17
","
A18
","
B18
","
A19
","
B19
",
"
A20
","
B20
","
A21
","
B21
","
A22
","
B22
","
A23
","
B23
");
@Y
=
("
A24
","
B24
","
A25
","
B25
","
A26
","
B26
","
A27
","
B27
",
"
A28
","
B28
","
A29
","
B29
","
A30
","
B30
","
A31
","
B31
");
@DAT
=
("
A6
",
"
A7
",
"
B6
",
"
B7
",
"
A8
",
"
A9
",
"
B8
",
"
B9
",
"
A10
","
A11
","
B10
","
B11
","
A12
","
A13
","
B12
","
B13
");
# yes, overlaps with @DAT, used only in 2x interleave code path...
@K2x
=
("
A6
",
"
B6
",
"
A7
",
"
B7
",
"
A8
",
"
B8
",
"
A9
",
"
B9
",
"
A10
","
B10
","
A11
","
B11
","
A2
",
"
B2
",
"
A13
","
B13
");
$code
.=
<<___;
.text
.if .ASSEMBLER_VERSION<7000000
.asg 0,__TI_EABI__
.endif
.if __TI_EABI__
.asg ChaCha20_ctr32,_ChaCha20_ctr32
.endif
.asg B3,RA
.asg A15,FP
.asg B15,SP
.global _ChaCha20_ctr32
.align 32
_ChaCha20_ctr32:
.asmfunc stack_usage(40+64)
MV $LEN,A0 ; reassign
[!A0] BNOP RA ; no data
|| [A0] STW FP,*SP--(40+64) ; save frame pointer and alloca(40+64)
|| [A0] MV SP,FP
[A0] STDW B13:B12,*SP[4+8] ; ABI says so
|| [A0] MV $KEYB,$KEYA
|| [A0] MV $COUNTERA,$COUNTERB
[A0] STDW B11:B10,*SP[3+8]
|| [A0] STDW A13:A12,*FP[-3]
[A0] STDW A11:A10,*FP[-4]
|| [A0] MVK 128,$STEP ; 2 * input block size
[A0] LDW *${KEYA}[0],@Y[4] ; load key
|| [A0] LDW *${KEYB}[1],@Y[5]
|| [A0] MVK 0x00007865,@Y[0] ; synthesize sigma
|| [A0] MVK 0x0000646e,@Y[1]
[A0] LDW *${KEYA}[2],@Y[6]
|| [A0] LDW *${KEYB}[3],@Y[7]
|| [A0] MVKH 0x61700000,@Y[0]
|| [A0] MVKH 0x33200000,@Y[1]
LDW *${KEYA}[4],@Y[8]
|| LDW *${KEYB}[5],@Y[9]
|| MVK 0x00002d32,@Y[2]
|| MVK 0x00006574,@Y[3]
LDW *${KEYA}[6],@Y[10]
|| LDW *${KEYB}[7],@Y[11]
|| MVKH 0x79620000,@Y[2]
|| MVKH 0x6b200000,@Y[3]
LDW *${COUNTERA}[0],@Y[12] ; load counter||nonce
|| LDW *${COUNTERB}[1],@Y[13]
|| CMPLTU A0,$STEP,A1 ; is length < 2*blocks?
LDW *${COUNTERA}[2],@Y[14]
|| LDW *${COUNTERB}[3],@Y[15]
|| [A1] BNOP top1x?
[A1] MVK 64,$STEP ; input block size
|| MVK 10,B0 ; inner loop counter
DMV @Y[2],@Y[0],@X[2]:@X[0] ; copy block
|| DMV @Y[3],@Y[1],@X[3]:@X[1]
||[!A1] STDW @Y[2]:@Y[0],*FP[-12] ; offload key material to stack
||[!A1] STDW @Y[3]:@Y[1],*SP[2]
DMV @Y[6],@Y[4],@X[6]:@X[4]
|| DMV @Y[7],@Y[5],@X[7]:@X[5]
||[!A1] STDW @Y[6]:@Y[4],*FP[-10]
||[!A1] STDW @Y[7]:@Y[5],*SP[4]
DMV @Y[10],@Y[8],@X[10]:@X[8]
|| DMV @Y[11],@Y[9],@X[11]:@X[9]
||[!A1] STDW @Y[10]:@Y[8],*FP[-8]
||[!A1] STDW @Y[11]:@Y[9],*SP[6]
DMV @Y[14],@Y[12],@X[14]:@X[12]
|| DMV @Y[15],@Y[13],@X[15]:@X[13]
||[!A1] MV @Y[12],@K2x[12] ; counter
||[!A1] MV @Y[13],@K2x[13]
||[!A1] STW @Y[14],*FP[-6*2]
||[!A1] STW @Y[15],*SP[8*2]
___
{
################################################################
# 2x interleave gives 50% performance improvement
#
my
(
$a0
,
$a1
,
$a2
,
$a3
)
=
(
0
..
3
);
my
(
$b0
,
$b1
,
$b2
,
$b3
)
=
(
4
..
7
);
my
(
$c0
,
$c1
,
$c2
,
$c3
)
=
(
8
..
11
);
my
(
$d0
,
$d1
,
$d2
,
$d3
)
=
(
12
..
15
);
$code
.=
<<___;
outer2x?:
ADD @X[$b1],@X[$a1],@X[$a1]
|| ADD @X[$b2],@X[$a2],@X[$a2]
|| ADD @X[$b0],@X[$a0],@X[$a0]
|| ADD @X[$b3],@X[$a3],@X[$a3]
|| DMV @Y[2],@Y[0],@K2x[2]:@K2x[0]
|| DMV @Y[3],@Y[1],@K2x[3]:@K2x[1]
XOR @X[$a1],@X[$d1],@X[$d1]
|| XOR @X[$a2],@X[$d2],@X[$d2]
|| XOR @X[$a0],@X[$d0],@X[$d0]
|| XOR @X[$a3],@X[$d3],@X[$d3]
|| DMV @Y[6],@Y[4],@K2x[6]:@K2x[4]
|| DMV @Y[7],@Y[5],@K2x[7]:@K2x[5]
SWAP2 @X[$d1],@X[$d1] ; rotate by 16
|| SWAP2 @X[$d2],@X[$d2]
|| SWAP2 @X[$d0],@X[$d0]
|| SWAP2 @X[$d3],@X[$d3]
ADD @X[$d1],@X[$c1],@X[$c1]
|| ADD @X[$d2],@X[$c2],@X[$c2]
|| ADD @X[$d0],@X[$c0],@X[$c0]
|| ADD @X[$d3],@X[$c3],@X[$c3]
|| DMV @Y[10],@Y[8],@K2x[10]:@K2x[8]
|| DMV @Y[11],@Y[9],@K2x[11]:@K2x[9]
XOR @X[$c1],@X[$b1],@X[$b1]
|| XOR @X[$c2],@X[$b2],@X[$b2]
|| XOR @X[$c0],@X[$b0],@X[$b0]
|| XOR @X[$c3],@X[$b3],@X[$b3]
|| ADD 1,@Y[12],@Y[12] ; adjust counter for 2nd block
ROTL @X[$b1],12,@X[$b1]
|| ROTL @X[$b2],12,@X[$b2]
|| MV @Y[14],@K2x[14]
|| MV @Y[15],@K2x[15]
top2x?:
ROTL @X[$b0],12,@X[$b0]
|| ROTL @X[$b3],12,@X[$b3]
|| ADD @Y[$b1],@Y[$a1],@Y[$a1]
|| ADD @Y[$b2],@Y[$a2],@Y[$a2]
ADD @Y[$b0],@Y[$a0],@Y[$a0]
|| ADD @Y[$b3],@Y[$a3],@Y[$a3]
|| ADD @X[$b1],@X[$a1],@X[$a1]
|| ADD @X[$b2],@X[$a2],@X[$a2]
|| XOR @Y[$a1],@Y[$d1],@Y[$d1]
|| XOR @Y[$a2],@Y[$d2],@Y[$d2]
XOR @Y[$a0],@Y[$d0],@Y[$d0]
|| XOR @Y[$a3],@Y[$d3],@Y[$d3]
|| ADD @X[$b0],@X[$a0],@X[$a0]
|| ADD @X[$b3],@X[$a3],@X[$a3]
|| XOR @X[$a1],@X[$d1],@X[$d1]
|| XOR @X[$a2],@X[$d2],@X[$d2]
XOR @X[$a0],@X[$d0],@X[$d0]
|| XOR @X[$a3],@X[$d3],@X[$d3]
|| ROTL @X[$d1],8,@X[$d1]
|| ROTL @X[$d2],8,@X[$d2]
|| SWAP2 @Y[$d1],@Y[$d1] ; rotate by 16
|| SWAP2 @Y[$d2],@Y[$d2]
|| SWAP2 @Y[$d0],@Y[$d0]
|| SWAP2 @Y[$d3],@Y[$d3]
ROTL @X[$d0],8,@X[$d0]
|| ROTL @X[$d3],8,@X[$d3]
|| ADD @Y[$d1],@Y[$c1],@Y[$c1]
|| ADD @Y[$d2],@Y[$c2],@Y[$c2]
|| ADD @Y[$d0],@Y[$c0],@Y[$c0]
|| ADD @Y[$d3],@Y[$c3],@Y[$c3]
|| BNOP middle2x1? ; protect from interrupt
ADD @X[$d1],@X[$c1],@X[$c1]
|| ADD @X[$d2],@X[$c2],@X[$c2]
|| XOR @Y[$c1],@Y[$b1],@Y[$b1]
|| XOR @Y[$c2],@Y[$b2],@Y[$b2]
|| XOR @Y[$c0],@Y[$b0],@Y[$b0]
|| XOR @Y[$c3],@Y[$b3],@Y[$b3]
ADD @X[$d0],@X[$c0],@X[$c0]
|| ADD @X[$d3],@X[$c3],@X[$c3]
|| XOR @X[$c1],@X[$b1],@X[$b1]
|| XOR @X[$c2],@X[$b2],@X[$b2]
|| ROTL @X[$d1],0,@X[$d2] ; moved to avoid cross-path stall
|| ROTL @X[$d2],0,@X[$d3]
XOR @X[$c0],@X[$b0],@X[$b0]
|| XOR @X[$c3],@X[$b3],@X[$b3]
|| MV @X[$d0],@X[$d1]
|| MV @X[$d3],@X[$d0]
|| ROTL @Y[$b1],12,@Y[$b1]
|| ROTL @Y[$b2],12,@Y[$b2]
ROTL @X[$b1],7,@X[$b0] ; avoided cross-path stall
|| ROTL @X[$b2],7,@X[$b1]
ROTL @X[$b0],7,@X[$b3]
|| ROTL @X[$b3],7,@X[$b2]
middle2x1?:
ROTL @Y[$b0],12,@Y[$b0]
|| ROTL @Y[$b3],12,@Y[$b3]
|| ADD @X[$b0],@X[$a0],@X[$a0]
|| ADD @X[$b1],@X[$a1],@X[$a1]
ADD @X[$b2],@X[$a2],@X[$a2]
|| ADD @X[$b3],@X[$a3],@X[$a3]
|| ADD @Y[$b1],@Y[$a1],@Y[$a1]
|| ADD @Y[$b2],@Y[$a2],@Y[$a2]
|| XOR @X[$a0],@X[$d0],@X[$d0]
|| XOR @X[$a1],@X[$d1],@X[$d1]
XOR @X[$a2],@X[$d2],@X[$d2]
|| XOR @X[$a3],@X[$d3],@X[$d3]
|| ADD @Y[$b0],@Y[$a0],@Y[$a0]
|| ADD @Y[$b3],@Y[$a3],@Y[$a3]
|| XOR @Y[$a1],@Y[$d1],@Y[$d1]
|| XOR @Y[$a2],@Y[$d2],@Y[$d2]
XOR @Y[$a0],@Y[$d0],@Y[$d0]
|| XOR @Y[$a3],@Y[$d3],@Y[$d3]
|| ROTL @Y[$d1],8,@Y[$d1]
|| ROTL @Y[$d2],8,@Y[$d2]
|| SWAP2 @X[$d0],@X[$d0] ; rotate by 16
|| SWAP2 @X[$d1],@X[$d1]
|| SWAP2 @X[$d2],@X[$d2]
|| SWAP2 @X[$d3],@X[$d3]
ROTL @Y[$d0],8,@Y[$d0]
|| ROTL @Y[$d3],8,@Y[$d3]
|| ADD @X[$d0],@X[$c2],@X[$c2]
|| ADD @X[$d1],@X[$c3],@X[$c3]
|| ADD @X[$d2],@X[$c0],@X[$c0]
|| ADD @X[$d3],@X[$c1],@X[$c1]
|| BNOP middle2x2? ; protect from interrupt
ADD @Y[$d1],@Y[$c1],@Y[$c1]
|| ADD @Y[$d2],@Y[$c2],@Y[$c2]
|| XOR @X[$c2],@X[$b0],@X[$b0]
|| XOR @X[$c3],@X[$b1],@X[$b1]
|| XOR @X[$c0],@X[$b2],@X[$b2]
|| XOR @X[$c1],@X[$b3],@X[$b3]
ADD @Y[$d0],@Y[$c0],@Y[$c0]
|| ADD @Y[$d3],@Y[$c3],@Y[$c3]
|| XOR @Y[$c1],@Y[$b1],@Y[$b1]
|| XOR @Y[$c2],@Y[$b2],@Y[$b2]
|| ROTL @Y[$d1],0,@Y[$d2] ; moved to avoid cross-path stall
|| ROTL @Y[$d2],0,@Y[$d3]
XOR @Y[$c0],@Y[$b0],@Y[$b0]
|| XOR @Y[$c3],@Y[$b3],@Y[$b3]
|| MV @Y[$d0],@Y[$d1]
|| MV @Y[$d3],@Y[$d0]
|| ROTL @X[$b0],12,@X[$b0]
|| ROTL @X[$b1],12,@X[$b1]
ROTL @Y[$b1],7,@Y[$b0] ; avoided cross-path stall
|| ROTL @Y[$b2],7,@Y[$b1]
ROTL @Y[$b0],7,@Y[$b3]
|| ROTL @Y[$b3],7,@Y[$b2]
middle2x2?:
ROTL @X[$b2],12,@X[$b2]
|| ROTL @X[$b3],12,@X[$b3]
|| ADD @Y[$b0],@Y[$a0],@Y[$a0]
|| ADD @Y[$b1],@Y[$a1],@Y[$a1]
ADD @Y[$b2],@Y[$a2],@Y[$a2]
|| ADD @Y[$b3],@Y[$a3],@Y[$a3]
|| ADD @X[$b0],@X[$a0],@X[$a0]
|| ADD @X[$b1],@X[$a1],@X[$a1]
|| XOR @Y[$a0],@Y[$d0],@Y[$d0]
|| XOR @Y[$a1],@Y[$d1],@Y[$d1]
XOR @Y[$a2],@Y[$d2],@Y[$d2]
|| XOR @Y[$a3],@Y[$d3],@Y[$d3]
|| ADD @X[$b2],@X[$a2],@X[$a2]
|| ADD @X[$b3],@X[$a3],@X[$a3]
|| XOR @X[$a0],@X[$d0],@X[$d0]
|| XOR @X[$a1],@X[$d1],@X[$d1]
XOR @X[$a2],@X[$d2],@X[$d2]
|| XOR @X[$a3],@X[$d3],@X[$d3]
|| ROTL @X[$d0],8,@X[$d0]
|| ROTL @X[$d1],8,@X[$d1]
|| SWAP2 @Y[$d0],@Y[$d0] ; rotate by 16
|| SWAP2 @Y[$d1],@Y[$d1]
|| SWAP2 @Y[$d2],@Y[$d2]
|| SWAP2 @Y[$d3],@Y[$d3]
ROTL @X[$d2],8,@X[$d2]
|| ROTL @X[$d3],8,@X[$d3]
|| ADD @Y[$d0],@Y[$c2],@Y[$c2]
|| ADD @Y[$d1],@Y[$c3],@Y[$c3]
|| ADD @Y[$d2],@Y[$c0],@Y[$c0]
|| ADD @Y[$d3],@Y[$c1],@Y[$c1]
|| BNOP bottom2x1? ; protect from interrupt
ADD @X[$d0],@X[$c2],@X[$c2]
|| ADD @X[$d1],@X[$c3],@X[$c3]
|| XOR @Y[$c2],@Y[$b0],@Y[$b0]
|| XOR @Y[$c3],@Y[$b1],@Y[$b1]
|| XOR @Y[$c0],@Y[$b2],@Y[$b2]
|| XOR @Y[$c1],@Y[$b3],@Y[$b3]
ADD @X[$d2],@X[$c0],@X[$c0]
|| ADD @X[$d3],@X[$c1],@X[$c1]
|| XOR @X[$c2],@X[$b0],@X[$b0]
|| XOR @X[$c3],@X[$b1],@X[$b1]
|| ROTL @X[$d0],0,@X[$d3] ; moved to avoid cross-path stall
|| ROTL @X[$d1],0,@X[$d0]
XOR @X[$c0],@X[$b2],@X[$b2]
|| XOR @X[$c1],@X[$b3],@X[$b3]
|| MV @X[$d2],@X[$d1]
|| MV @X[$d3],@X[$d2]
|| ROTL @Y[$b0],12,@Y[$b0]
|| ROTL @Y[$b1],12,@Y[$b1]
ROTL @X[$b0],7,@X[$b1] ; avoided cross-path stall
|| ROTL @X[$b1],7,@X[$b2]
ROTL @X[$b2],7,@X[$b3]
|| ROTL @X[$b3],7,@X[$b0]
|| [B0] SUB B0,1,B0 ; decrement inner loop counter
bottom2x1?:
ROTL @Y[$b2],12,@Y[$b2]
|| ROTL @Y[$b3],12,@Y[$b3]
|| [B0] ADD @X[$b1],@X[$a1],@X[$a1] ; modulo-scheduled
|| [B0] ADD @X[$b2],@X[$a2],@X[$a2]
[B0] ADD @X[$b0],@X[$a0],@X[$a0]
|| [B0] ADD @X[$b3],@X[$a3],@X[$a3]
|| ADD @Y[$b0],@Y[$a0],@Y[$a0]
|| ADD @Y[$b1],@Y[$a1],@Y[$a1]
|| [B0] XOR @X[$a1],@X[$d1],@X[$d1]
|| [B0] XOR @X[$a2],@X[$d2],@X[$d2]
[B0] XOR @X[$a0],@X[$d0],@X[$d0]
|| [B0] XOR @X[$a3],@X[$d3],@X[$d3]
|| ADD @Y[$b2],@Y[$a2],@Y[$a2]
|| ADD @Y[$b3],@Y[$a3],@Y[$a3]
|| XOR @Y[$a0],@Y[$d0],@Y[$d0]
|| XOR @Y[$a1],@Y[$d1],@Y[$d1]
XOR @Y[$a2],@Y[$d2],@Y[$d2]
|| XOR @Y[$a3],@Y[$d3],@Y[$d3]
|| ROTL @Y[$d0],8,@Y[$d0]
|| ROTL @Y[$d1],8,@Y[$d1]
|| [B0] SWAP2 @X[$d1],@X[$d1] ; rotate by 16
|| [B0] SWAP2 @X[$d2],@X[$d2]
|| [B0] SWAP2 @X[$d0],@X[$d0]
|| [B0] SWAP2 @X[$d3],@X[$d3]
ROTL @Y[$d2],8,@Y[$d2]
|| ROTL @Y[$d3],8,@Y[$d3]
|| [B0] ADD @X[$d1],@X[$c1],@X[$c1]
|| [B0] ADD @X[$d2],@X[$c2],@X[$c2]
|| [B0] ADD @X[$d0],@X[$c0],@X[$c0]
|| [B0] ADD @X[$d3],@X[$c3],@X[$c3]
|| [B0] BNOP top2x? ; even protects from interrupt
ADD @Y[$d0],@Y[$c2],@Y[$c2]
|| ADD @Y[$d1],@Y[$c3],@Y[$c3]
|| [B0] XOR @X[$c1],@X[$b1],@X[$b1]
|| [B0] XOR @X[$c2],@X[$b2],@X[$b2]
|| [B0] XOR @X[$c0],@X[$b0],@X[$b0]
|| [B0] XOR @X[$c3],@X[$b3],@X[$b3]
ADD @Y[$d2],@Y[$c0],@Y[$c0]
|| ADD @Y[$d3],@Y[$c1],@Y[$c1]
|| XOR @Y[$c2],@Y[$b0],@Y[$b0]
|| XOR @Y[$c3],@Y[$b1],@Y[$b1]
|| ROTL @Y[$d0],0,@Y[$d3] ; moved to avoid cross-path stall
|| ROTL @Y[$d1],0,@Y[$d0]
XOR @Y[$c0],@Y[$b2],@Y[$b2]
|| XOR @Y[$c1],@Y[$b3],@Y[$b3]
|| MV @Y[$d2],@Y[$d1]
|| MV @Y[$d3],@Y[$d2]
|| [B0] ROTL @X[$b1],12,@X[$b1]
|| [B0] ROTL @X[$b2],12,@X[$b2]
ROTL @Y[$b0],7,@Y[$b1] ; avoided cross-path stall
|| ROTL @Y[$b1],7,@Y[$b2]
ROTL @Y[$b2],7,@Y[$b3]
|| ROTL @Y[$b3],7,@Y[$b0]
bottom2x2?:
___
}
$code
.=
<<___;
ADD @K2x[0],@X[0],@X[0] ; accumulate key material
|| ADD @K2x[1],@X[1],@X[1]
|| ADD @K2x[2],@X[2],@X[2]
|| ADD @K2x[3],@X[3],@X[3]
ADD @K2x[0],@Y[0],@Y[0]
|| ADD @K2x[1],@Y[1],@Y[1]
|| ADD @K2x[2],@Y[2],@Y[2]
|| ADD @K2x[3],@Y[3],@Y[3]
|| LDNDW *${INP}++[8],@DAT[1]:@DAT[0]
ADD @K2x[4],@X[4],@X[4]
|| ADD @K2x[5],@X[5],@X[5]
|| ADD @K2x[6],@X[6],@X[6]
|| ADD @K2x[7],@X[7],@X[7]
|| LDNDW *${INP}[-7],@DAT[3]:@DAT[2]
ADD @K2x[4],@Y[4],@Y[4]
|| ADD @K2x[5],@Y[5],@Y[5]
|| ADD @K2x[6],@Y[6],@Y[6]
|| ADD @K2x[7],@Y[7],@Y[7]
|| LDNDW *${INP}[-6],@DAT[5]:@DAT[4]
ADD @K2x[8],@X[8],@X[8]
|| ADD @K2x[9],@X[9],@X[9]
|| ADD @K2x[10],@X[10],@X[10]
|| ADD @K2x[11],@X[11],@X[11]
|| LDNDW *${INP}[-5],@DAT[7]:@DAT[6]
ADD @K2x[8],@Y[8],@Y[8]
|| ADD @K2x[9],@Y[9],@Y[9]
|| ADD @K2x[10],@Y[10],@Y[10]
|| ADD @K2x[11],@Y[11],@Y[11]
|| LDNDW *${INP}[-4],@DAT[9]:@DAT[8]
ADD @K2x[12],@X[12],@X[12]
|| ADD @K2x[13],@X[13],@X[13]
|| ADD @K2x[14],@X[14],@X[14]
|| ADD @K2x[15],@X[15],@X[15]
|| LDNDW *${INP}[-3],@DAT[11]:@DAT[10]
ADD @K2x[12],@Y[12],@Y[12]
|| ADD @K2x[13],@Y[13],@Y[13]
|| ADD @K2x[14],@Y[14],@Y[14]
|| ADD @K2x[15],@Y[15],@Y[15]
|| LDNDW *${INP}[-2],@DAT[13]:@DAT[12]
ADD 1,@Y[12],@Y[12] ; adjust counter for 2nd block
|| ADD 2,@K2x[12],@K2x[12] ; increment counter
|| LDNDW *${INP}[-1],@DAT[15]:@DAT[14]
.if .BIG_ENDIAN
SWAP2 @X[0],@X[0]
|| SWAP2 @X[1],@X[1]
|| SWAP2 @X[2],@X[2]
|| SWAP2 @X[3],@X[3]
SWAP2 @X[4],@X[4]
|| SWAP2 @X[5],@X[5]
|| SWAP2 @X[6],@X[6]
|| SWAP2 @X[7],@X[7]
SWAP2 @X[8],@X[8]
|| SWAP2 @X[9],@X[9]
|| SWAP4 @X[0],@X[1]
|| SWAP4 @X[1],@X[0]
SWAP2 @X[10],@X[10]
|| SWAP2 @X[11],@X[11]
|| SWAP4 @X[2],@X[3]
|| SWAP4 @X[3],@X[2]
SWAP2 @X[12],@X[12]
|| SWAP2 @X[13],@X[13]
|| SWAP4 @X[4],@X[5]
|| SWAP4 @X[5],@X[4]
SWAP2 @X[14],@X[14]
|| SWAP2 @X[15],@X[15]
|| SWAP4 @X[6],@X[7]
|| SWAP4 @X[7],@X[6]
SWAP4 @X[8],@X[9]
|| SWAP4 @X[9],@X[8]
|| SWAP2 @Y[0],@Y[0]
|| SWAP2 @Y[1],@Y[1]
SWAP4 @X[10],@X[11]
|| SWAP4 @X[11],@X[10]
|| SWAP2 @Y[2],@Y[2]
|| SWAP2 @Y[3],@Y[3]
SWAP4 @X[12],@X[13]
|| SWAP4 @X[13],@X[12]
|| SWAP2 @Y[4],@Y[4]
|| SWAP2 @Y[5],@Y[5]
SWAP4 @X[14],@X[15]
|| SWAP4 @X[15],@X[14]
|| SWAP2 @Y[6],@Y[6]
|| SWAP2 @Y[7],@Y[7]
SWAP2 @Y[8],@Y[8]
|| SWAP2 @Y[9],@Y[9]
|| SWAP4 @Y[0],@Y[1]
|| SWAP4 @Y[1],@Y[0]
SWAP2 @Y[10],@Y[10]
|| SWAP2 @Y[11],@Y[11]
|| SWAP4 @Y[2],@Y[3]
|| SWAP4 @Y[3],@Y[2]
SWAP2 @Y[12],@Y[12]
|| SWAP2 @Y[13],@Y[13]
|| SWAP4 @Y[4],@Y[5]
|| SWAP4 @Y[5],@Y[4]
SWAP2 @Y[14],@Y[14]
|| SWAP2 @Y[15],@Y[15]
|| SWAP4 @Y[6],@Y[7]
|| SWAP4 @Y[7],@Y[6]
SWAP4 @Y[8],@Y[9]
|| SWAP4 @Y[9],@Y[8]
SWAP4 @Y[10],@Y[11]
|| SWAP4 @Y[11],@Y[10]
SWAP4 @Y[12],@Y[13]
|| SWAP4 @Y[13],@Y[12]
SWAP4 @Y[14],@Y[15]
|| SWAP4 @Y[15],@Y[14]
.endif
XOR @DAT[0],@X[0],@X[0] ; xor 1st block
|| XOR @DAT[3],@X[3],@X[3]
|| XOR @DAT[2],@X[2],@X[1]
|| XOR @DAT[1],@X[1],@X[2]
|| LDNDW *${INP}++[8],@DAT[1]:@DAT[0]
XOR @DAT[4],@X[4],@X[4]
|| XOR @DAT[7],@X[7],@X[7]
|| LDNDW *${INP}[-7],@DAT[3]:@DAT[2]
XOR @DAT[6],@X[6],@X[5]
|| XOR @DAT[5],@X[5],@X[6]
|| LDNDW *${INP}[-6],@DAT[5]:@DAT[4]
XOR @DAT[8],@X[8],@X[8]
|| XOR @DAT[11],@X[11],@X[11]
|| LDNDW *${INP}[-5],@DAT[7]:@DAT[6]
XOR @DAT[10],@X[10],@X[9]
|| XOR @DAT[9],@X[9],@X[10]
|| LDNDW *${INP}[-4],@DAT[9]:@DAT[8]
XOR @DAT[12],@X[12],@X[12]
|| XOR @DAT[15],@X[15],@X[15]
|| LDNDW *${INP}[-3],@DAT[11]:@DAT[10]
XOR @DAT[14],@X[14],@X[13]
|| XOR @DAT[13],@X[13],@X[14]
|| LDNDW *${INP}[-2],@DAT[13]:@DAT[12]
[A0] SUB A0,$STEP,A0 ; SUB A0,128,A0
|| LDNDW *${INP}[-1],@DAT[15]:@DAT[14]
XOR @Y[0],@DAT[0],@DAT[0] ; xor 2nd block
|| XOR @Y[1],@DAT[1],@DAT[1]
|| STNDW @X[2]:@X[0],*${OUT}++[8]
XOR @Y[2],@DAT[2],@DAT[2]
|| XOR @Y[3],@DAT[3],@DAT[3]
|| STNDW @X[3]:@X[1],*${OUT}[-7]
XOR @Y[4],@DAT[4],@DAT[4]
|| [A0] LDDW *FP[-12],@X[2]:@X[0] ; re-load key material from stack
|| [A0] LDDW *SP[2], @X[3]:@X[1]
XOR @Y[5],@DAT[5],@DAT[5]
|| STNDW @X[6]:@X[4],*${OUT}[-6]
XOR @Y[6],@DAT[6],@DAT[6]
|| XOR @Y[7],@DAT[7],@DAT[7]
|| STNDW @X[7]:@X[5],*${OUT}[-5]
XOR @Y[8],@DAT[8],@DAT[8]
|| [A0] LDDW *FP[-10],@X[6]:@X[4]
|| [A0] LDDW *SP[4], @X[7]:@X[5]
XOR @Y[9],@DAT[9],@DAT[9]
|| STNDW @X[10]:@X[8],*${OUT}[-4]
XOR @Y[10],@DAT[10],@DAT[10]
|| XOR @Y[11],@DAT[11],@DAT[11]
|| STNDW @X[11]:@X[9],*${OUT}[-3]
XOR @Y[12],@DAT[12],@DAT[12]
|| [A0] LDDW *FP[-8], @X[10]:@X[8]
|| [A0] LDDW *SP[6], @X[11]:@X[9]
XOR @Y[13],@DAT[13],@DAT[13]
|| STNDW @X[14]:@X[12],*${OUT}[-2]
XOR @Y[14],@DAT[14],@DAT[14]
|| XOR @Y[15],@DAT[15],@DAT[15]
|| STNDW @X[15]:@X[13],*${OUT}[-1]
[A0] MV @K2x[12],@X[12]
|| [A0] MV @K2x[13],@X[13]
|| [A0] LDW *FP[-6*2], @X[14]
|| [A0] LDW *SP[8*2], @X[15]
[A0] DMV @X[2],@X[0],@Y[2]:@Y[0] ; duplicate key material
|| STNDW @DAT[1]:@DAT[0],*${OUT}++[8]
[A0] DMV @X[3],@X[1],@Y[3]:@Y[1]
|| STNDW @DAT[3]:@DAT[2],*${OUT}[-7]
[A0] DMV @X[6],@X[4],@Y[6]:@Y[4]
|| STNDW @DAT[5]:@DAT[4],*${OUT}[-6]
|| CMPLTU A0,$STEP,A1 ; is remaining length < 2*blocks?
||[!A0] BNOP epilogue?
[A0] DMV @X[7],@X[5],@Y[7]:@Y[5]
|| STNDW @DAT[7]:@DAT[6],*${OUT}[-5]
||[!A1] BNOP outer2x?
[A0] DMV @X[10],@X[8],@Y[10]:@Y[8]
|| STNDW @DAT[9]:@DAT[8],*${OUT}[-4]
[A0] DMV @X[11],@X[9],@Y[11]:@Y[9]
|| STNDW @DAT[11]:@DAT[10],*${OUT}[-3]
[A0] DMV @X[14],@X[12],@Y[14]:@Y[12]
|| STNDW @DAT[13]:@DAT[12],*${OUT}[-2]
[A0] DMV @X[15],@X[13],@Y[15]:@Y[13]
|| STNDW @DAT[15]:@DAT[14],*${OUT}[-1]
;;===== branch to epilogue? is taken here
[A1] MVK 64,$STEP
|| [A0] MVK 10,B0 ; inner loop counter
;;===== branch to outer2x? is taken here
___
{
my
(
$a0
,
$a1
,
$a2
,
$a3
)
=
(
0
..
3
);
my
(
$b0
,
$b1
,
$b2
,
$b3
)
=
(
4
..
7
);
my
(
$c0
,
$c1
,
$c2
,
$c3
)
=
(
8
..
11
);
my
(
$d0
,
$d1
,
$d2
,
$d3
)
=
(
12
..
15
);
$code
.=
<<___;
top1x?:
ADD @X[$b1],@X[$a1],@X[$a1]
|| ADD @X[$b2],@X[$a2],@X[$a2]
ADD @X[$b0],@X[$a0],@X[$a0]
|| ADD @X[$b3],@X[$a3],@X[$a3]
|| XOR @X[$a1],@X[$d1],@X[$d1]
|| XOR @X[$a2],@X[$d2],@X[$d2]
XOR @X[$a0],@X[$d0],@X[$d0]
|| XOR @X[$a3],@X[$d3],@X[$d3]
|| SWAP2 @X[$d1],@X[$d1] ; rotate by 16
|| SWAP2 @X[$d2],@X[$d2]
SWAP2 @X[$d0],@X[$d0]
|| SWAP2 @X[$d3],@X[$d3]
|| ADD @X[$d1],@X[$c1],@X[$c1]
|| ADD @X[$d2],@X[$c2],@X[$c2]
ADD @X[$d0],@X[$c0],@X[$c0]
|| ADD @X[$d3],@X[$c3],@X[$c3]
|| XOR @X[$c1],@X[$b1],@X[$b1]
|| XOR @X[$c2],@X[$b2],@X[$b2]
XOR @X[$c0],@X[$b0],@X[$b0]
|| XOR @X[$c3],@X[$b3],@X[$b3]
|| ROTL @X[$b1],12,@X[$b1]
|| ROTL @X[$b2],12,@X[$b2]
ROTL @X[$b0],12,@X[$b0]
|| ROTL @X[$b3],12,@X[$b3]
ADD @X[$b1],@X[$a1],@X[$a1]
|| ADD @X[$b2],@X[$a2],@X[$a2]
ADD @X[$b0],@X[$a0],@X[$a0]
|| ADD @X[$b3],@X[$a3],@X[$a3]
|| XOR @X[$a1],@X[$d1],@X[$d1]
|| XOR @X[$a2],@X[$d2],@X[$d2]
XOR @X[$a0],@X[$d0],@X[$d0]
|| XOR @X[$a3],@X[$d3],@X[$d3]
|| ROTL @X[$d1],8,@X[$d1]
|| ROTL @X[$d2],8,@X[$d2]
ROTL @X[$d0],8,@X[$d0]
|| ROTL @X[$d3],8,@X[$d3]
|| BNOP middle1x? ; protect from interrupt
ADD @X[$d1],@X[$c1],@X[$c1]
|| ADD @X[$d2],@X[$c2],@X[$c2]
ADD @X[$d0],@X[$c0],@X[$c0]
|| ADD @X[$d3],@X[$c3],@X[$c3]
|| XOR @X[$c1],@X[$b1],@X[$b1]
|| XOR @X[$c2],@X[$b2],@X[$b2]
|| ROTL @X[$d1],0,@X[$d2] ; moved to avoid cross-path stall
|| ROTL @X[$d2],0,@X[$d3]
XOR @X[$c0],@X[$b0],@X[$b0]
|| XOR @X[$c3],@X[$b3],@X[$b3]
|| ROTL @X[$d0],0,@X[$d1]
|| ROTL @X[$d3],0,@X[$d0]
ROTL @X[$b1],7,@X[$b0] ; avoided cross-path stall
|| ROTL @X[$b2],7,@X[$b1]
ROTL @X[$b0],7,@X[$b3]
|| ROTL @X[$b3],7,@X[$b2]
middle1x?:
ADD @X[$b0],@X[$a0],@X[$a0]
|| ADD @X[$b1],@X[$a1],@X[$a1]
ADD @X[$b2],@X[$a2],@X[$a2]
|| ADD @X[$b3],@X[$a3],@X[$a3]
|| XOR @X[$a0],@X[$d0],@X[$d0]
|| XOR @X[$a1],@X[$d1],@X[$d1]
XOR @X[$a2],@X[$d2],@X[$d2]
|| XOR @X[$a3],@X[$d3],@X[$d3]
|| SWAP2 @X[$d0],@X[$d0] ; rotate by 16
|| SWAP2 @X[$d1],@X[$d1]
SWAP2 @X[$d2],@X[$d2]
|| SWAP2 @X[$d3],@X[$d3]
|| ADD @X[$d0],@X[$c2],@X[$c2]
|| ADD @X[$d1],@X[$c3],@X[$c3]
ADD @X[$d2],@X[$c0],@X[$c0]
|| ADD @X[$d3],@X[$c1],@X[$c1]
|| XOR @X[$c2],@X[$b0],@X[$b0]
|| XOR @X[$c3],@X[$b1],@X[$b1]
XOR @X[$c0],@X[$b2],@X[$b2]
|| XOR @X[$c1],@X[$b3],@X[$b3]
|| ROTL @X[$b0],12,@X[$b0]
|| ROTL @X[$b1],12,@X[$b1]
ROTL @X[$b2],12,@X[$b2]
|| ROTL @X[$b3],12,@X[$b3]
ADD @X[$b0],@X[$a0],@X[$a0]
|| ADD @X[$b1],@X[$a1],@X[$a1]
|| [B0] SUB B0,1,B0 ; decrement inner loop counter
ADD @X[$b2],@X[$a2],@X[$a2]
|| ADD @X[$b3],@X[$a3],@X[$a3]
|| XOR @X[$a0],@X[$d0],@X[$d0]
|| XOR @X[$a1],@X[$d1],@X[$d1]
XOR @X[$a2],@X[$d2],@X[$d2]
|| XOR @X[$a3],@X[$d3],@X[$d3]
|| ROTL @X[$d0],8,@X[$d0]
|| ROTL @X[$d1],8,@X[$d1]
ROTL @X[$d2],8,@X[$d2]
|| ROTL @X[$d3],8,@X[$d3]
|| [B0] BNOP top1x? ; even protects from interrupt
ADD @X[$d0],@X[$c2],@X[$c2]
|| ADD @X[$d1],@X[$c3],@X[$c3]
ADD @X[$d2],@X[$c0],@X[$c0]
|| ADD @X[$d3],@X[$c1],@X[$c1]
|| XOR @X[$c2],@X[$b0],@X[$b0]
|| XOR @X[$c3],@X[$b1],@X[$b1]
|| ROTL @X[$d0],0,@X[$d3] ; moved to avoid cross-path stall
|| ROTL @X[$d1],0,@X[$d0]
XOR @X[$c0],@X[$b2],@X[$b2]
|| XOR @X[$c1],@X[$b3],@X[$b3]
|| ROTL @X[$d2],0,@X[$d1]
|| ROTL @X[$d3],0,@X[$d2]
ROTL @X[$b0],7,@X[$b1] ; avoided cross-path stall
|| ROTL @X[$b1],7,@X[$b2]
ROTL @X[$b2],7,@X[$b3]
|| ROTL @X[$b3],7,@X[$b0]
||[!B0] CMPLTU A0,$STEP,A1 ; less than 64 bytes left?
bottom1x?:
___
}
$code
.=
<<___;
ADD @Y[0],@X[0],@X[0] ; accumulate key material
|| ADD @Y[1],@X[1],@X[1]
|| ADD @Y[2],@X[2],@X[2]
|| ADD @Y[3],@X[3],@X[3]
||[!A1] LDNDW *${INP}++[8],@DAT[1]:@DAT[0]
|| [A1] BNOP tail?
ADD @Y[4],@X[4],@X[4]
|| ADD @Y[5],@X[5],@X[5]
|| ADD @Y[6],@X[6],@X[6]
|| ADD @Y[7],@X[7],@X[7]
||[!A1] LDNDW *${INP}[-7],@DAT[3]:@DAT[2]
ADD @Y[8],@X[8],@X[8]
|| ADD @Y[9],@X[9],@X[9]
|| ADD @Y[10],@X[10],@X[10]
|| ADD @Y[11],@X[11],@X[11]
||[!A1] LDNDW *${INP}[-6],@DAT[5]:@DAT[4]
ADD @Y[12],@X[12],@X[12]
|| ADD @Y[13],@X[13],@X[13]
|| ADD @Y[14],@X[14],@X[14]
|| ADD @Y[15],@X[15],@X[15]
||[!A1] LDNDW *${INP}[-5],@DAT[7]:@DAT[6]
[!A1] LDNDW *${INP}[-4],@DAT[9]:@DAT[8]
[!A1] LDNDW *${INP}[-3],@DAT[11]:@DAT[10]
LDNDW *${INP}[-2],@DAT[13]:@DAT[12]
LDNDW *${INP}[-1],@DAT[15]:@DAT[14]
.if .BIG_ENDIAN
SWAP2 @X[0],@X[0]
|| SWAP2 @X[1],@X[1]
|| SWAP2 @X[2],@X[2]
|| SWAP2 @X[3],@X[3]
SWAP2 @X[4],@X[4]
|| SWAP2 @X[5],@X[5]
|| SWAP2 @X[6],@X[6]
|| SWAP2 @X[7],@X[7]
SWAP2 @X[8],@X[8]
|| SWAP2 @X[9],@X[9]
|| SWAP4 @X[0],@X[1]
|| SWAP4 @X[1],@X[0]
SWAP2 @X[10],@X[10]
|| SWAP2 @X[11],@X[11]
|| SWAP4 @X[2],@X[3]
|| SWAP4 @X[3],@X[2]
SWAP2 @X[12],@X[12]
|| SWAP2 @X[13],@X[13]
|| SWAP4 @X[4],@X[5]
|| SWAP4 @X[5],@X[4]
SWAP2 @X[14],@X[14]
|| SWAP2 @X[15],@X[15]
|| SWAP4 @X[6],@X[7]
|| SWAP4 @X[7],@X[6]
SWAP4 @X[8],@X[9]
|| SWAP4 @X[9],@X[8]
SWAP4 @X[10],@X[11]
|| SWAP4 @X[11],@X[10]
SWAP4 @X[12],@X[13]
|| SWAP4 @X[13],@X[12]
SWAP4 @X[14],@X[15]
|| SWAP4 @X[15],@X[14]
.else
NOP 1
.endif
XOR @X[0],@DAT[0],@DAT[0] ; xor with input
|| XOR @X[1],@DAT[1],@DAT[1]
|| XOR @X[2],@DAT[2],@DAT[2]
|| XOR @X[3],@DAT[3],@DAT[3]
|| [A0] SUB A0,$STEP,A0 ; SUB A0,64,A0
XOR @X[4],@DAT[4],@DAT[4]
|| XOR @X[5],@DAT[5],@DAT[5]
|| XOR @X[6],@DAT[6],@DAT[6]
|| XOR @X[7],@DAT[7],@DAT[7]
|| STNDW @DAT[1]:@DAT[0],*${OUT}++[8]
XOR @X[8],@DAT[8],@DAT[8]
|| XOR @X[9],@DAT[9],@DAT[9]
|| XOR @X[10],@DAT[10],@DAT[10]
|| XOR @X[11],@DAT[11],@DAT[11]
|| STNDW @DAT[3]:@DAT[2],*${OUT}[-7]
XOR @X[12],@DAT[12],@DAT[12]
|| XOR @X[13],@DAT[13],@DAT[13]
|| XOR @X[14],@DAT[14],@DAT[14]
|| XOR @X[15],@DAT[15],@DAT[15]
|| STNDW @DAT[5]:@DAT[4],*${OUT}[-6]
|| [A0] BNOP top1x?
[A0] DMV @Y[2],@Y[0],@X[2]:@X[0] ; duplicate key material
|| [A0] DMV @Y[3],@Y[1],@X[3]:@X[1]
|| STNDW @DAT[7]:@DAT[6],*${OUT}[-5]
[A0] DMV @Y[6],@Y[4],@X[6]:@X[4]
|| [A0] DMV @Y[7],@Y[5],@X[7]:@X[5]
|| STNDW @DAT[9]:@DAT[8],*${OUT}[-4]
[A0] DMV @Y[10],@Y[8],@X[10]:@X[8]
|| [A0] DMV @Y[11],@Y[9],@X[11]:@X[9]
|| [A0] ADD 1,@Y[12],@Y[12] ; increment counter
|| STNDW @DAT[11]:@DAT[10],*${OUT}[-3]
[A0] DMV @Y[14],@Y[12],@X[14]:@X[12]
|| [A0] DMV @Y[15],@Y[13],@X[15]:@X[13]
|| STNDW @DAT[13]:@DAT[12],*${OUT}[-2]
[A0] MVK 10,B0 ; inner loop counter
|| STNDW @DAT[15]:@DAT[14],*${OUT}[-1]
;;===== branch to top1x? is taken here
epilogue?:
LDDW *FP[-4],A11:A10 ; ABI says so
LDDW *FP[-3],A13:A12
|| LDDW *SP[3+8],B11:B10
LDDW *SP[4+8],B13:B12
|| BNOP RA
LDW *++SP(40+64),FP ; restore frame pointer
NOP 4
tail?:
LDBU *${INP}++[1],B24 ; load byte by byte
|| SUB A0,1,A0
|| SUB A0,1,B1
[!B1] BNOP epilogue? ; interrupts are disabled for whole time
|| [A0] LDBU *${INP}++[1],B24
|| [A0] SUB A0,1,A0
|| SUB B1,1,B1
[!B1] BNOP epilogue?
|| [A0] LDBU *${INP}++[1],B24
|| [A0] SUB A0,1,A0
|| SUB B1,1,B1
[!B1] BNOP epilogue?
|| ROTL @X[0],0,A24
|| [A0] LDBU *${INP}++[1],B24
|| [A0] SUB A0,1,A0
|| SUB B1,1,B1
[!B1] BNOP epilogue?
|| ROTL @X[0],24,A24
|| [A0] LDBU *${INP}++[1],A24
|| [A0] SUB A0,1,A0
|| SUB B1,1,B1
[!B1] BNOP epilogue?
|| ROTL @X[0],16,A24
|| [A0] LDBU *${INP}++[1],A24
|| [A0] SUB A0,1,A0
|| SUB B1,1,B1
|| XOR A24,B24,B25
STB B25,*${OUT}++[1] ; store byte by byte
||[!B1] BNOP epilogue?
|| ROTL @X[0],8,A24
|| [A0] LDBU *${INP}++[1],A24
|| [A0] SUB A0,1,A0
|| SUB B1,1,B1
|| XOR A24,B24,B25
STB B25,*${OUT}++[1]
___
sub
TAIL_STEP
{
my
$Xi
=
shift
;
my
$T
=
(
$Xi
=~
/^B/
?"
B24
":"
A24
");
# match @X[i] to avoid cross path
my
$D
=
$T
;
$D
=~
tr/AB/BA/
;
my
$O
=
$D
;
$O
=~
s/24/25/
;
$code
.=
<<___;
||[!B1] BNOP epilogue?
|| ROTL $Xi,0,$T
|| [A0] LDBU *${INP}++[1],$D
|| [A0] SUB A0,1,A0
|| SUB B1,1,B1
|| XOR A24,B24,$O
STB $O,*${OUT}++[1]
||[!B1] BNOP epilogue?
|| ROTL $Xi,24,$T
|| [A0] LDBU *${INP}++[1],$T
|| [A0] SUB A0,1,A0
|| SUB B1,1,B1
|| XOR A24,B24,$O
STB $O,*${OUT}++[1]
||[!B1] BNOP epilogue?
|| ROTL $Xi,16,$T
|| [A0] LDBU *${INP}++[1],$T
|| [A0] SUB A0,1,A0
|| SUB B1,1,B1
|| XOR A24,B24,$O
STB $O,*${OUT}++[1]
||[!B1] BNOP epilogue?
|| ROTL $Xi,8,$T
|| [A0] LDBU *${INP}++[1],$T
|| [A0] SUB A0,1,A0
|| SUB B1,1,B1
|| XOR A24,B24,$O
STB $O,*${OUT}++[1]
___
}
foreach
(
1
..
14
)
{
TAIL_STEP
(
@X
[
$_
]);
}
$code
.=
<<___;
||[!B1] BNOP epilogue?
|| ROTL @X[15],0,B24
|| XOR A24,B24,A25
STB A25,*${OUT}++[1]
|| ROTL @X[15],24,B24
|| XOR A24,B24,A25
STB A25,*${OUT}++[1]
|| ROTL @X[15],16,B24
|| XOR A24,B24,A25
STB A25,*${OUT}++[1]
|| XOR A24,B24,A25
STB A25,*${OUT}++[1]
|| XOR A24,B24,B25
STB B25,*${OUT}++[1]
.endasmfunc
.sect .const
.cstring "ChaCha20 for C64x+, CRYPTOGAMS by <appro\@openssl.org>"
.align 4
___
print
$code
;
close
STDOUT
;
crypto/poly1305/asm/poly1305-c64xplus.pl
0 → 100755
浏览文件 @
f4e175e4
#!/usr/bin/env perl
#
# ====================================================================
# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
# ====================================================================
#
# Poly1305 hash for C64x+.
#
# October 2015
#
# Performance is [incredible for a 32-bit processor] 1.76 cycles per
# processed byte. Comparison to compiler-generated code is problematic,
# because results were observed to vary from 2.1 to 7.6 cpb depending
# on compiler's ability to inline small functions. Compiler also
# disables interrupts for some reason, thus making interrupt response
# time dependent on input length. This module on the other hand is free
# from such limitation.
(
$CTXA
,
$INPB
,
$LEN
,
$PADBIT
)
=
("
A4
","
B4
","
A6
","
B6
");
(
$H0
,
$H1
,
$H2
,
$H3
,
$H4
,
$H4a
)
=
("
A8
","
B8
","
A10
","
B10
","
B2
",
$LEN
);
(
$D0
,
$D1
,
$D2
,
$D3
)
=
("
A9
","
B9
","
A11
","
B11
");
(
$R0
,
$R1
,
$R2
,
$R3
,
$S1
,
$S2
,
$S3
,
$S3b
)
=
("
A0
","
B0
","
A1
","
B1
","
A12
","
B12
","
A13
","
B13
");
(
$THREE
,
$R0b
,
$S2a
)
=
("
B7
","
B5
","
A5
");
$code
.=
<<___;
.text
.if .ASSEMBLER_VERSION<7000000
.asg 0,__TI_EABI__
.endif
.if __TI_EABI__
.asg poly1305_init,_poly1305_init
.asg poly1305_blocks,_poly1305_blocks
.asg poly1305_emit,_poly1305_emit
.endif
.asg B3,RA
.asg A15,FP
.asg B15,SP
.if .LITTLE_ENDIAN
.asg MV,SWAP2
.asg MV.L,SWAP4
.endif
.global _poly1305_init
_poly1305_init:
.asmfunc
LDNDW *${INPB}[0],B17:B16 ; load key material
LDNDW *${INPB}[1],A17:A16
|| ZERO B9:B8
|| MVK -1,B0
STDW B9:B8,*${CTXA}[0] ; initialize h1:h0
|| SHRU B0,4,B0 ; 0x0fffffff
|| MVK -4,B1
STDW B9:B8,*${CTXA}[1] ; initialize h3:h2
|| AND B0,B1,B1 ; 0x0ffffffc
STW B8,*${CTXA}[4] ; initialize h4
.if .BIG_ENDIAN
SWAP2 B16,B17
|| SWAP2 B17,B16
SWAP2 A16,A17
|| SWAP2 A17,A16
SWAP4 B16,B16
|| SWAP4 A16,A16
SWAP4 B17,B17
|| SWAP4 A17,A17
.endif
AND B16,B0,B20 ; r0 = key[0] & 0x0fffffff
|| AND B17,B1,B22 ; r1 = key[1] & 0x0ffffffc
|| EXTU B17,4,6,B16 ; r1>>2
AND A16,B1,B21 ; r2 = key[2] & 0x0ffffffc
|| AND A17,B1,A23 ; r3 = key[3] & 0x0ffffffc
|| BNOP RA
SHRU B21,2,B18
|| ADD B22,B16,B16 ; s1 = r1 + r1>>2
STDW B21:B20,*${CTXA}[3] ; save r2:r0
|| ADD B21,B18,B18 ; s2 = r2 + r2>>2
|| SHRU A23,2,B17
|| MV A23,B23
STDW B23:B22,*${CTXA}[4] ; save r3:r1
|| ADD B23,B17,B19 ; s3 = r3 + r3>>2
|| ADD B23,B17,B17 ; s3 = r3 + r3>>2
STDW B17:B16,*${CTXA}[5] ; save s3:s1
STDW B19:B18,*${CTXA}[6] ; save s3:s2
|| ZERO A4 ; return 0
.endasmfunc
.global _poly1305_blocks
.align 32
_poly1305_blocks:
.asmfunc stack_usage(40)
SHRU $LEN,4,A2 ; A2 is loop counter, number of blocks
[!A2] BNOP RA ; no data
|| [A2] STW FP,*SP--(40) ; save frame pointer and alloca(40)
|| [A2] MV SP,FP
[A2] STDW B13:B12,*SP[4] ; ABI says so
|| [A2] MV $CTXA,$S3b ; borrow $S3b
[A2] STDW B11:B10,*SP[3]
|| [A2] STDW A13:A12,*FP[-3]
[A2] STDW A11:A10,*FP[-4]
|| [A2] LDDW *${S3b}[0],B25:B24 ; load h1:h0
[A2] LDNW *${INPB}++[4],$D0 ; load inp[0]
[A2] LDNW *${INPB}[-3],$D1 ; load inp[1]
LDDW *${CTXA}[1],B29:B28 ; load h3:h2, B28 is h2
LDNW *${INPB}[-2],$D2 ; load inp[2]
LDNW *${INPB}[-1],$D3 ; load inp[3]
LDDW *${CTXA}[3],$R2:$R0 ; load r2:r0
|| LDDW *${S3b}[4],$R3:$R1 ; load r3:r1
|| SWAP2 $D0,$D0
LDDW *${CTXA}[5],$S3:$S1 ; load s3:s1
|| LDDW *${S3b}[6],$S3b:$S2 ; load s3:s2
|| SWAP4 $D0,$D0
|| SWAP2 $D1,$D1
ADDU $D0,B24,$D0:$H0 ; h0+=inp[0]
|| ADD $D0,B24,B31 ; B-copy of h0+inp[0]
|| SWAP4 $D1,$D1
ADDU $D1,B25,$D1:$H1 ; h1+=inp[1]
|| MVK 3,$THREE
|| SWAP2 $D2,$D2
LDW *${CTXA}[4],$H4 ; load h4
|| SWAP4 $D2,$D2
|| MV B29,B30 ; B30 is h3
MV $R0,$R0b
loop?:
MPY32U $H0,$R0,A17:A16
|| MPY32U B31,$R1,B17:B16 ; MPY32U $H0,$R1,B17:B16
|| ADDU $D0,$D1:$H1,B25:B24 ; ADDU $D0,$D1:$H1,$D1:$H1
|| ADDU $D2,B28,$D2:$H2 ; h2+=inp[2]
|| SWAP2 $D3,$D3
MPY32U $H0,$R2,A19:A18
|| MPY32U B31,$R3,B19:B18 ; MPY32U $H0,$R3,B19:B18
|| ADD $D0,$H1,A24 ; A-copy of B24
|| SWAP4 $D3,$D3
|| [A2] SUB A2,1,A2 ; decrement loop counter
MPY32U A24,$S3,A21:A20 ; MPY32U $H1,$S3,A21:A20
|| MPY32U B24,$R0b,B21:B20 ; MPY32U $H1,$R0,B21:B20
|| ADDU B25,$D2:$H2,$D2:$H2 ; ADDU $D1,$D2:$H2,$D2:$H2
|| ADDU $D3,B30,$D3:$H3 ; h3+=inp[3]
|| ADD B25,$H2,B25 ; B-copy of $H2
MPY32U A24,$R1,A23:A22 ; MPY32U $H1,$R1,A23:A22
|| MPY32U B24,$R2,B23:B22 ; MPY32U $H1,$R2,B23:B22
MPY32U $H2,$S2,A25:A24
|| MPY32U B25,$S3b,B25:B24 ; MPY32U $H2,$S3,B25:B24
|| ADDU $D2,$D3:$H3,$D3:$H3
|| ADD $PADBIT,$H4,$H4 ; h4+=padbit
MPY32U $H2,$R0,A27:A26
|| MPY32U $H2,$R1,B27:B26
|| ADD $D3,$H4,$H4
|| MV $S2,$S2a
MPY32U $H3,$S1,A29:A28
|| MPY32U $H3,$S2,B29:B28
|| ADD A21,A17,A21 ; start accumulating "d3:d0"
|| ADD B21,B17,B21
|| ADDU A20,A16,A17:A16
|| ADDU B20,B16,B17:B16
|| [A2] LDNW *${INPB}++[4],$D0 ; load inp[0]
MPY32U $H3,$S3,A31:A30
|| MPY32U $H3,$R0b,B31:B30
|| ADD A23,A19,A23
|| ADD B23,B19,B23
|| ADDU A22,A18,A19:A18
|| ADDU B22,B18,B19:B18
|| [A2] LDNW *${INPB}[-3],$D1 ; load inp[1]
MPY32 $H4,$S1,B20
|| MPY32 $H4,$S2a,A20
|| ADD A25,A21,A21
|| ADD B25,B21,B21
|| ADDU A24,A17:A16,A17:A16
|| ADDU B24,B17:B16,B17:B16
|| [A2] LDNW *${INPB}[-2],$D2 ; load inp[2]
MPY32 $H4,$S3b,B22
|| ADD A27,A23,A23
|| ADD B27,B23,B23
|| ADDU A26,A19:A18,A19:A18
|| ADDU B26,B19:B18,B19:B18
|| [A2] LDNW *${INPB}[-1],$D3 ; load inp[3]
MPY32 $H4,$R0b,$H4
|| ADD A29,A21,A21 ; final hi("d0")
|| ADD B29,B21,B21 ; final hi("d1")
|| ADDU A28,A17:A16,A17:A16 ; final lo("d0")
|| ADDU B28,B17:B16,B17:B16
ADD A31,A23,A23 ; final hi("d2")
|| ADD B31,B23,B23 ; final hi("d3")
|| ADDU A30,A19:A18,A19:A18
|| ADDU B30,B19:B18,B19:B18
ADDU B20,B17:B16,B17:B16 ; final lo("d1")
|| ADDU A20,A19:A18,A19:A18 ; final lo("d2")
ADDU B22,B19:B18,B19:B18 ; final lo("d3")
|| ADD A17,A21,A21 ; "flatten" "d3:d0"
MV A19,B29 ; move to avoid cross-path stalls
ADDU A21,B17:B16,B27:B26 ; B26 is h1
ADD B21,B27,B27
|| DMV B29,A18,B29:B28 ; move to avoid cross-path stalls
ADDU B27,B29:B28,B29:B28 ; B28 is h2
|| [A2] SWAP2 $D0,$D0
ADD A23,B29,B29
|| [A2] SWAP4 $D0,$D0
ADDU B29,B19:B18,B31:B30 ; B30 is h3
ADD B23,B31,B31
|| MV A16,B24 ; B24 is h0
|| [A2] SWAP2 $D1,$D1
ADD B31,$H4,$H4
|| [A2] SWAP4 $D1,$D1
SHRU $H4,2,B16 ; last reduction step
|| AND $H4,$THREE,$H4
|| [A2] BNOP loop?
ADDAW B16,B16,B16 ; 5*(h4>>2)
ADDU B24,B16,B25:B24 ; B24 is h0
|| [A2] SWAP2 $D2,$D2
ADDU B26,B25,B27:B26 ; B26 is h1
|| [A2] SWAP4 $D2,$D2
ADDU B28,B27,B29:B28 ; B28 is h2
|| [A2] ADDU $D0,B24,$D0:$H0 ; h0+=inp[0]
|| [A2] ADD $D0,B24,B31 ; B-copy of h0+inp[0]
ADD B30,B29,B30 ; B30 is h3
|| [A2] ADDU $D1,B26,$D1:$H1 ; h1+=inp[1]
;;===== branch to loop? is taken here
LDDW *FP[-4],A11:A10 ; ABI says so
LDDW *FP[-3],A13:A12
|| LDDW *SP[3],B11:B10
LDDW *SP[4],B13:B12
|| MV B26,B25
|| BNOP RA
LDW *++SP(40),FP ; restore frame pointer
|| MV B30,B29
STDW B25:B24,*${CTXA}[0] ; save h1:h0
STDW B29:B28,*${CTXA}[1] ; save h3:h2
STW $H4,*${CTXA}[4] ; save h4
NOP 1
.endasmfunc
___
{
my
(
$MAC
,
$NONCEA
,
$NONCEB
)
=
(
$INPB
,
$LEN
,
$PADBIT
);
$code
.=
<<___;
.global _poly1305_emit
.align 32
_poly1305_emit:
.asmfunc
LDDW *${CTXA}[0],A17:A16 ; load h1:h0
LDDW *${CTXA}[1],A19:A18 ; load h3:h2
LDW *${CTXA}[4],A20 ; load h4
MV $NONCEA,$NONCEB
MVK 5,A22 ; compare to modulus
ADDU A16,A22,A23:A22
|| LDW *${NONCEA}[0],A8
|| LDW *${NONCEB}[1],B8
ADDU A17,A23,A25:A24
|| LDW *${NONCEA}[2],A9
|| LDW *${NONCEB}[3],B9
ADDU A19,A25,A27:A26
ADDU A19,A27,A29:A28
ADD A20,A29,A29
SHRU A29,2,A2 ; check for overflow in 130-th bit
[A2] MV A22,A16 ; select
|| [A2] MV A24,A17
[A2] MV A26,A18
|| [A2] MV A28,A19
|| ADDU A8,A16,A23:A22 ; accumulate nonce
ADDU B8,A17,A25:A24
|| SWAP2 A22,A22
ADDU A23,A25:A24,A25:A24
ADDU A9,A18,A27:A26
|| SWAP2 A24,A24
ADDU A25,A27:A26,A27:A26
|| ADD B9,A19,A28
ADD A27,A28,A28
|| SWAP2 A26,A26
.if .BIG_ENDIAN
SWAP2 A28,A28
|| SWAP4 A22,A22
|| SWAP4 A24,B24
SWAP4 A26,A26
SWAP4 A28,A28
|| MV B24,A24
.endif
BNOP RA,1
STNW A22,*${MAC}[0] ; write the result
STNW A24,*${MAC}[1]
STNW A26,*${MAC}[2]
STNW A28,*${MAC}[3]
.endasmfunc
___
}
$code
.=
<<___;
.sect .const
.cstring "Poly1305 for C64x+, CRYPTOGAMS by <appro\@openssl.org>"
.align 4
___
print
$code
;
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录