Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
OpenHarmony
Third Party Openssl
提交
a1613840
T
Third Party Openssl
项目概览
OpenHarmony
/
Third Party Openssl
接近 2 年 前同步成功
通知
12
Star
18
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
Third Party Openssl
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
a1613840
编写于
6月 26, 2017
作者:
A
Andy Polyakov
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
sha/asm/keccak1600-x86_64.pl: optimize by re-ordering instructions.
Reviewed-by:
N
Richard Levitte
<
levitte@openssl.org
>
上级
a078d9df
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
95 addition
and
83 deletion
+95
-83
crypto/sha/asm/keccak1600-x86_64.pl
crypto/sha/asm/keccak1600-x86_64.pl
+95
-83
未找到文件。
crypto/sha/asm/keccak1600-x86_64.pl
浏览文件 @
a1613840
...
...
@@ -22,22 +22,33 @@
# instead of actually unrolling the loop pair-wise I simply flip
# pointers to T[][] and A[][] at the end of round. Since number of
# rounds is even, last round writes to A[][] and everything works out.
# How does it compare to assembly module in Keccak Code Package? KCP
# is faster on couple of processors, VIA Nano and Goldmont by 4-6%,
# otherwise this module is either as fast or faster by up to 15%...
#
########################################################################
# Numbers are cycles per processed byte out of large message.
#
# r=1088
# r=1088
(*)
#
# P4 45.8
# Core 2 14.2
# Sandy Bridge 13.0
# Haswell 9.8
# P4 25.8
# Core 2 13.0
# Westmere 13.7
# Sandy Bridge 12.9(**)
# Haswell 9.7
# Skylake 9.4
# Silvermont 22.4
# Goldmont 18.0
# VIA Nano 19.1
# Sledgehammer 13.8
# Bulldozer 16.7
# Silvermont 22.8
# Goldmont 16.4
# VIA Nano 18.0
# Sledgehammer 13.3
# Bulldozer 16.5
#
# (*) Corresponds to SHA3-256. Improvement over compiler-generate
# varies a lot, most commont coefficient is 15% in comparison to
# gcc-5.x, 50% for gcc-4.x, 90% for gcc-3.x.
# (**) Sandy Bridge has broken rotate instruction. Performance can be
# improved by 14% by replacing rotates with double-precision
# shift with same register as source and destination.
$flavour
=
shift
;
$output
=
shift
;
...
...
@@ -82,79 +93,78 @@ __KeccakF1600:
.align 32
.Loop:
xor $A[0][0](%rdi),@C[0]
xor $A[0][1](%rdi),@C[1]
mov $A[0][0](%rdi),@D[0]
mov $A[1][1](%rdi),@D[1]
mov $A[2][2](%rdi),@D[2]
mov $A[3][3](%rdi),@D[3]
xor $A[0][2](%rdi),@C[2]
xor $A[0][3](%rdi),@C[3]
xor @D[0], @C[0]
xor $A[0][1](%rdi),@C[1]
xor $A[1][2](%rdi),@C[2]
xor $A[1][0](%rdi),@C[0]
mov @C[4],@D[4]
xor $A[0][4](%rdi),@C[4]
xor $A[1][0](%rdi),@C[0]
xor $A[1][1](%rdi),@C[1]
xor $A[1][2](%rdi),@C[2]
xor $A[1][3](%rdi),@C[3]
xor $A[1][4](%rdi),@C[4]
xor @D[2], @C[2]
xor $A[2][0](%rdi),@C[0]
xor $A[2][1](%rdi),@C[1]
xor $A[2][2](%rdi),@C[2]
xor $A[2][3](%rdi),@C[3]
xor $A[2][4](%rdi),@C[4]
xor $A[1][3](%rdi),@C[3]
xor @D[1], @C[1]
xor $A[1][4](%rdi),@C[4]
xor $A[3][0](%rdi),@C[0]
xor $A[3][1](%rdi),@C[1]
xor $A[3][2](%rdi),@C[2]
xor $A[3][3](%rdi),@C[3]
xor $A[3][4](%rdi),@C[4]
xor $A[3][0](%rdi),@C[0]
xor $A[2][3](%rdi),@C[3]
xor $A[2][1](%rdi),@C[1]
xor $A[2][4](%rdi),@C[4]
mov @C[2],@T[0]
rol \$1,@C[2]
mov $A[0][0](%rdi),@D[0]
xor @C[0],@C[2] # D[1] = ROL64(C[2], 1) ^ C[0]
xor @D[3], @C[3]
rol \$1,@C[0]
mov $A[1][1](%rdi),@D[1]
xor @C[3],@C[0] # D[4] = ROL64(C[0], 1) ^ C[3]
xor $A[3][1](%rdi),@C[1]
rol \$1,@C[3]
mov $A[2][2](%rdi),@D[2]
xor @C[1],@C[3] # D[2] = ROL64(C[3], 1) ^ C[1]
xor $A[3][4](%rdi),@C[4]
rol \$1,@C[1]
mov $A[3][3](%rdi),@D[3]
xor @C[4],@C[1] # D[0] = ROL64(C[1], 1) ^ C[4]
rol \$1,@C[4]
mov $A[4][4](%rdi),@D[4]
xor @T[0],@C[4] # D[3] = ROL64(C[4], 1) ^ C[2]
___
my
@E
=
@D
;
@D
=
(
@C
[
1
],
@C
[
2
],
@C
[
3
],
@C
[
4
],
@C
[
0
]);
@C
=
@E
;
$code
.=
<<___;
xor @D[0],@C[0]
xor @D[1],@C[1]
xor @D[2],@C[2]
rol \$$rhotates[1][1],@C[1]
xor @D[3],@C[3]
xor @D[4],@C[4]
rol \$$rhotates[1][1],@C[1]
rol \$$rhotates[2][2],@C[2]
xor @D[0],@C[0]
mov @C[1],@T[0]
rol \$$rhotates[3][3],@C[3]
or @C[2],@C[1]
xor @C[0],@C[1] # C[0] ^ ( C[1] | C[2])
rol \$$rhotates[4][4],@C[4]
mov @C[1],@T[0]
or @C[2],@C[1]
xor @C[0],@C[1] # C[0] ^ ( C[1] | C[2])
xor ($iotas),@C[1]
lea 8($iotas),$iotas
mov @C[1],$A[0][0](%rsi) # R[0][0] = C[0] ^ ( C[1] | C[2]) ^ iotas[i]
xor ($iotas),@C[1]
lea 8($iotas),$iotas
mov @C[4],@T[1]
and @C[3],@C[4]
mov @C[1],$A[0][0](%rsi) # R[0][0] = C[0] ^ ( C[1] | C[2]) ^ iotas[i]
xor @C[2],@C[4] # C[2] ^ ( C[4] & C[3])
not @C[2]
mov @C[4],$A[0][2](%rsi) # R[0][2] = C[2] ^ ( C[4] & C[3])
not @C[2]
or @C[3],@C[2]
xor @T[0],@C[2] # C[1] ^ (~C[2] | C[3])
mov @C[2],$A[0][1](%rsi) # R[0][1] = C[1] ^ (~C[2] | C[3])
...
...
@@ -169,34 +179,33 @@ $code.=<<___;
mov $A[0][3](%rdi),@C[0]
mov $A[4][2](%rdi),@C[4]
mov $A[3][1](%rdi),@C[3]
mov $A[1][4](%rdi),@C[1]
mov $A[2][0](%rdi),@C[2]
mov $A[3][1](%rdi),@C[3]
mov $A[4][2](%rdi),@C[4]
xor @D[3],@C[0]
xor @D[4],@C[1]
xor @D[0],@C[2]
xor @D[1],@C[3]
xor @D[2],@C[4]
rol \$$rhotates[0][3],@C[0]
xor @D[1],@C[3]
xor @D[4],@C[1]
rol \$$rhotates[4][2],@C[4]
rol \$$rhotates[3][1],@C[3]
xor @D[0],@C[2]
rol \$$rhotates[1][4],@C[1]
mov @C[0],@T[0]
or @C[4],@C[0]
rol \$$rhotates[2][0],@C[2]
rol \$$rhotates[3][1],@C[3]
rol \$$rhotates[4][2],@C[4]
mov @C[0],@T[0]
or @C[4],@C[0]
xor @C[3],@C[0] # C[3] ^ (C[0] | C[4])
mov @C[0],$A[1][3](%rsi) # R[1][3] = C[3] ^ (C[0] | C[4])
mov @C[1],@T[1]
and @T[0],@C[1]
xor @C[4],@C[1] # C[4] ^ (C[1] & C[0])
not @C[4]
mov @C[1],$A[1][4](%rsi) # R[1][4] = C[4] ^ (C[1] & C[0])
not @C[4]
or @C[3],@C[4]
xor @C[2],@C[4] # C[2] ^ (~C[4] | C[3])
mov @C[4],$A[1][2](%rsi) # R[1][2] = C[2] ^ (~C[4] | C[3])
...
...
@@ -210,31 +219,30 @@ $code.=<<___;
mov @T[1],$A[1][0](%rsi) # R[1][0] = C[0] ^ (C[1] | C[2])
mov $A[0][1](%rdi),@C[0]
mov $A[1][2](%rdi),@C[1]
mov $A[2][3](%rdi),@C[2]
mov $A[3][4](%rdi),@C[3]
mov $A[1][2](%rdi),@C[1]
mov $A[4][0](%rdi),@C[4]
mov $A[0][1](%rdi),@C[0]
xor @D[1],@C[0]
xor @D[2],@C[1]
xor @D[3],@C[2]
xor @D[4],@C[3]
xor @D[0],@C[4]
rol \$$rhotates[0][1],@C[0]
rol \$$rhotates[1][2],@C[1]
rol \$$rhotates[2][3],@C[2]
xor @D[2],@C[1]
rol \$$rhotates[3][4],@C[3]
xor @D[0],@C[4]
rol \$$rhotates[1][2],@C[1]
xor @D[1],@C[0]
rol \$$rhotates[4][0],@C[4]
mov @C[2],@T[0]
and @C[3],@C[2]
rol \$$rhotates[0][1],@C[0]
mov @C[2],@T[0]
and @C[3],@C[2]
not @C[3]
xor @C[1],@C[2] # C[1] ^ ( C[2] & C[3])
mov @C[2],$A[2][1](%rsi) # R[2][1] = C[1] ^ ( C[2] & C[3])
mov @C[4],@T[1]
not @C[3]
and @C[3],@C[4]
xor @T[0],@C[4] # C[2] ^ ( C[4] & ~C[3])
mov @C[4],$A[2][2](%rsi) # R[2][2] = C[2] ^ ( C[4] & ~C[3])
...
...
@@ -252,31 +260,30 @@ $code.=<<___;
mov @C[0],$A[2][3](%rsi) # R[2][3] = ~C[3] ^ ( C[0] | C[4])
mov $A[0][4](%rdi),@C[0]
mov $A[1][0](%rdi),@C[1]
mov $A[2][1](%rdi),@C[2]
mov $A[3][2](%rdi),@C[3]
mov $A[1][0](%rdi),@C[1]
mov $A[4][3](%rdi),@C[4]
mov $A[0][4](%rdi),@C[0]
xor @D[4],@C[0]
xor @D[0],@C[1]
xor @D[1],@C[2]
xor @D[2],@C[3]
xor @D[3],@C[4]
rol \$$rhotates[0][4],@C[0]
rol \$$rhotates[1][0],@C[1]
rol \$$rhotates[2][1],@C[2]
xor @D[0],@C[1]
rol \$$rhotates[3][2],@C[3]
xor @D[3],@C[4]
rol \$$rhotates[1][0],@C[1]
xor @D[4],@C[0]
rol \$$rhotates[4][3],@C[4]
mov @C[2],@T[0]
or @C[3],@C[2]
rol \$$rhotates[0][4],@C[0]
mov @C[2],@T[0]
or @C[3],@C[2]
not @C[3]
xor @C[1],@C[2] # C[1] ^ ( C[2] | C[3])
mov @C[2],$A[3][1](%rsi) # R[3][1] = C[1] ^ ( C[2] | C[3])
mov @C[4],@T[1]
not @C[3]
or @C[3],@C[4]
xor @T[0],@C[4] # C[2] ^ ( C[4] | ~C[3])
mov @C[4],$A[3][2](%rsi) # R[3][2] = C[2] ^ ( C[4] | ~C[3])
...
...
@@ -296,26 +303,25 @@ $code.=<<___;
xor $A[0][2](%rdi),@D[2]
xor $A[1][3](%rdi),@D[3]
rol \$$rhotates[0][2],@D[2]
xor $A[4][1](%rdi),@D[1]
rol \$$rhotates[1][3],@D[3]
xor $A[2][4](%rdi),@D[4]
rol \$$rhotates[4][1],@D[1]
xor $A[3][0](%rdi),@D[0]
xor $A[4][1](%rdi),@D[1]
xchg %rsi,%rdi
rol \$$rhotates[0][2],@D[2]
rol \$$rhotates[1][3],@D[3]
rol \$$rhotates[2][4],@D[4]
rol \$$rhotates[3][0],@D[0]
rol \$$rhotates[4][1],@D[1]
___
@C
=
(
@D
[
2
],
@D
[
3
],
@D
[
4
],
@D
[
0
],
@D
[
1
]);
$code
.=
<<___;
mov @C[0],@T[0]
and @C[1],@C[0]
not @C[1]
xor @C[4],@C[0] # C[4] ^ ( C[0] & C[1])
mov @C[0],$A[4][4](%rdi) # R[4][4] = C[4] ^ ( C[0] & C[1])
mov @C[2],@T[1]
not @C[1]
and @C[1],@C[2]
xor @T[0],@C[2] # C[0] ^ ( C[2] & ~C[1])
mov @C[2],$A[4][0](%rdi) # R[4][0] = C[0] ^ ( C[2] & ~C[1])
...
...
@@ -432,7 +438,7 @@ SHA3_absorb:
lea 8($A_flat),$A_flat
sub \$8,$len
mov %rax,-8($A_flat)
dec
$bsz
sub \$1,
$bsz
jnz .Lblock_absorb
mov $inp,200-100(%rsi) # save inp
...
...
@@ -497,7 +503,7 @@ SHA3_squeeze:
sub \$8,$len # len -= 8
jz .Ldone_squeeze
dec %rcx
# bsz--
sub \$1,%rcx
# bsz--
jnz .Loop_squeeze
call KeccakF1600
...
...
@@ -552,6 +558,12 @@ iotas:
.asciz "Keccak-1600 absorb and squeeze for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
___
print
$code
;
foreach
(
split
("
\n
",
$code
))
{
# Below replacement results in 11.3 on Sandy Bridge, 9.4 on
# Haswell, but it hurts other processors by up to 2-3-4x...
#s/rol\s+(\$[0-9]+),(%[a-z][a-z0-9]+)/shld\t$1,$2,$2/;
print
$_
,
"
\n
";
}
close
STDOUT
;
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录