提交 9515acca 编写于 作者: A Andy Polyakov

aes/asm/aesfx-sparcv9.pl: switch to fshiftorx to improve single-block

and short-input performance.

[Fix bug in misaligned output handling.]
Reviewed-by: NRichard Levitte <levitte@openssl.org>
上级 8604a6e0
...@@ -26,6 +26,12 @@ ...@@ -26,6 +26,12 @@
# yet. CBC encrypt on the other hand is as good as it can possibly # yet. CBC encrypt on the other hand is as good as it can possibly
# get processing one byte in 4.1 cycles with 128-bit key on SPARC64 X. # get processing one byte in 4.1 cycles with 128-bit key on SPARC64 X.
# This is ~6x faster than pure software implementation... # This is ~6x faster than pure software implementation...
#
# July 2016
#
# Switch from faligndata to fshiftorx, which allows to omit alignaddr
# instructions and improve single-block and short-input performance
# with misaligned data.
$output = pop; $output = pop;
open STDOUT,">$output"; open STDOUT,">$output";
...@@ -45,18 +51,23 @@ $code.=<<___; ...@@ -45,18 +51,23 @@ $code.=<<___;
aes_fx_encrypt: aes_fx_encrypt:
and $inp, 7, $tmp ! is input aligned? and $inp, 7, $tmp ! is input aligned?
andn $inp, 7, $inp andn $inp, 7, $inp
ld [$key + 240], $rounds
ldd [$key + 0], %f6 ! round[0] ldd [$key + 0], %f6 ! round[0]
ldd [$key + 8], %f8 ldd [$key + 8], %f8
mov %o7, %g1
ld [$key + 240], $rounds
1: call .+8
add %o7, .Linp_align-1b, %o7
sll $tmp, 3, $tmp
ldd [$inp + 0], %f0 ! load input ldd [$inp + 0], %f0 ! load input
brz,pt $tmp, .Lenc_inp_aligned brz,pt $tmp, .Lenc_inp_aligned
ldd [$inp + 8], %f2 ldd [$inp + 8], %f2
ldd [%o7 + $tmp], %f14 ! shift left params
ldd [$inp + 16], %f4 ldd [$inp + 16], %f4
alignaddr $inp, $tmp, %g0 fshiftorx %f0, %f2, %f14, %f0
faligndata %f0, %f2, %f0 fshiftorx %f2, %f4, %f14, %f2
faligndata %f2, %f4, %f2
.Lenc_inp_aligned: .Lenc_inp_aligned:
ldd [$key + 16], %f10 ! round[1] ldd [$key + 16], %f10 ! round[1]
...@@ -87,17 +98,23 @@ aes_fx_encrypt: ...@@ -87,17 +98,23 @@ aes_fx_encrypt:
sub $rounds, 2, $rounds sub $rounds, 2, $rounds
andcc $out, 7, $tmp ! is output aligned? andcc $out, 7, $tmp ! is output aligned?
andn $out, 7, $out
mov 0xff, $mask mov 0xff, $mask
srl $mask, $tmp, $mask
add %o7, 64, %o7
sll $tmp, 3, $tmp
fmovd %f0, %f4 fmovd %f0, %f4
faesencx %f2, %f10, %f0 faesencx %f2, %f10, %f0
faesencx %f4, %f12, %f2 faesencx %f4, %f12, %f2
ldd [%o7 + $tmp], %f14 ! shift right params
fmovd %f0, %f4 fmovd %f0, %f4
faesenclx %f2, %f6, %f0 faesenclx %f2, %f6, %f0
faesenclx %f4, %f8, %f2 faesenclx %f4, %f8, %f2
bnz,a,pn %icc, .Lenc_out_unaligned bnz,pn %icc, .Lenc_out_unaligned
srl $mask, $tmp, $mask mov %g1, %o7
std %f0, [$out + 0] std %f0, [$out + 0]
retl retl
...@@ -105,16 +122,15 @@ aes_fx_encrypt: ...@@ -105,16 +122,15 @@ aes_fx_encrypt:
.align 16 .align 16
.Lenc_out_unaligned: .Lenc_out_unaligned:
alignaddrl $out, %g0, $out add $out, 16, $inp
faligndata %f0, %f0, %f4 orn %g0, $mask, $tmp
faligndata %f0, %f2, %f6 fshiftorx %f0, %f0, %f14, %f4
faligndata %f2, %f2, %f8 fshiftorx %f0, %f2, %f14, %f6
fshiftorx %f2, %f2, %f14, %f8
stda %f4, [$out + $mask]0xc0 ! partial store stda %f4, [$out + $mask]0xc0 ! partial store
std %f6, [$out + 8] std %f6, [$out + 8]
add $out, 16, $out stda %f8, [$inp + $tmp]0xc0 ! partial store
orn %g0, $mask, $mask
stda %f8, [$out + $mask]0xc0 ! partial store
retl retl
nop nop
.type aes_fx_encrypt,#function .type aes_fx_encrypt,#function
...@@ -125,18 +141,23 @@ aes_fx_encrypt: ...@@ -125,18 +141,23 @@ aes_fx_encrypt:
aes_fx_decrypt: aes_fx_decrypt:
and $inp, 7, $tmp ! is input aligned? and $inp, 7, $tmp ! is input aligned?
andn $inp, 7, $inp andn $inp, 7, $inp
ld [$key + 240], $rounds
ldd [$key + 0], %f6 ! round[0] ldd [$key + 0], %f6 ! round[0]
ldd [$key + 8], %f8 ldd [$key + 8], %f8
mov %o7, %g1
ld [$key + 240], $rounds
1: call .+8
add %o7, .Linp_align-1b, %o7
sll $tmp, 3, $tmp
ldd [$inp + 0], %f0 ! load input ldd [$inp + 0], %f0 ! load input
brz,pt $tmp, .Ldec_inp_aligned brz,pt $tmp, .Ldec_inp_aligned
ldd [$inp + 8], %f2 ldd [$inp + 8], %f2
ldd [%o7 + $tmp], %f14 ! shift left params
ldd [$inp + 16], %f4 ldd [$inp + 16], %f4
alignaddr $inp, $tmp, $inp fshiftorx %f0, %f2, %f14, %f0
faligndata %f0, %f2, %f0 fshiftorx %f2, %f4, %f14, %f2
faligndata %f2, %f4, %f2
.Ldec_inp_aligned: .Ldec_inp_aligned:
ldd [$key + 16], %f10 ! round[1] ldd [$key + 16], %f10 ! round[1]
...@@ -167,17 +188,23 @@ aes_fx_decrypt: ...@@ -167,17 +188,23 @@ aes_fx_decrypt:
sub $rounds, 2, $rounds sub $rounds, 2, $rounds
andcc $out, 7, $tmp ! is output aligned? andcc $out, 7, $tmp ! is output aligned?
andn $out, 7, $out
mov 0xff, $mask mov 0xff, $mask
srl $mask, $tmp, $mask
add %o7, 64, %o7
sll $tmp, 3, $tmp
fmovd %f0, %f4 fmovd %f0, %f4
faesdecx %f2, %f10, %f0 faesdecx %f2, %f10, %f0
faesdecx %f4, %f12, %f2 faesdecx %f4, %f12, %f2
ldd [%o7 + $tmp], %f14 ! shift right params
fmovd %f0, %f4 fmovd %f0, %f4
faesdeclx %f2, %f6, %f0 faesdeclx %f2, %f6, %f0
faesdeclx %f4, %f8, %f2 faesdeclx %f4, %f8, %f2
bnz,a,pn %icc, .Ldec_out_unaligned bnz,pn %icc, .Ldec_out_unaligned
srl $mask, $tmp, $mask mov %g1, %o7
std %f0, [$out + 0] std %f0, [$out + 0]
retl retl
...@@ -185,16 +212,15 @@ aes_fx_decrypt: ...@@ -185,16 +212,15 @@ aes_fx_decrypt:
.align 16 .align 16
.Ldec_out_unaligned: .Ldec_out_unaligned:
alignaddrl $out, %g0, $out add $out, 16, $inp
faligndata %f0, %f0, %f4 orn %g0, $mask, $tmp
faligndata %f0, %f2, %f6 fshiftorx %f0, %f0, %f14, %f4
faligndata %f2, %f2, %f8 fshiftorx %f0, %f2, %f14, %f6
fshiftorx %f2, %f2, %f14, %f8
stda %f4, [$out + $mask]0xc0 ! partial store stda %f4, [$out + $mask]0xc0 ! partial store
std %f6, [$out + 8] std %f6, [$out + 8]
add $out, 16, $out stda %f8, [$inp + $tmp]0xc0 ! partial store
orn %g0, $mask, $mask
stda %f8, [$out + $mask]0xc0 ! partial store
retl retl
nop nop
.type aes_fx_decrypt,#function .type aes_fx_decrypt,#function
...@@ -222,6 +248,14 @@ aes_fx_set_encrypt_key: ...@@ -222,6 +248,14 @@ aes_fx_set_encrypt_key:
.Lset_encrypt_key: .Lset_encrypt_key:
and $inp, 7, $tmp and $inp, 7, $tmp
andn $inp, 7, $inp andn $inp, 7, $inp
sll $tmp, 3, $tmp
mov %o7, %g1
1: call .+8
add %o7, .Linp_align-1b, %o7
ldd [%o7 + $tmp], %f10 ! shift left params
mov %g1, %o7
cmp $bits, 192 cmp $bits, 192
ldd [$inp + 0], %f0 ldd [$inp + 0], %f0
...@@ -234,11 +268,10 @@ aes_fx_set_encrypt_key: ...@@ -234,11 +268,10 @@ aes_fx_set_encrypt_key:
ldd [$inp + 24], %f6 ldd [$inp + 24], %f6
ldd [$inp + 32], %f8 ldd [$inp + 32], %f8
alignaddr $inp, $tmp, %g0 fshiftorx %f0, %f2, %f10, %f0
faligndata %f0, %f2, %f0 fshiftorx %f2, %f4, %f10, %f2
faligndata %f2, %f4, %f2 fshiftorx %f4, %f6, %f10, %f4
faligndata %f4, %f6, %f4 fshiftorx %f6, %f8, %f10, %f6
faligndata %f6, %f8, %f6
.L256aligned: .L256aligned:
mov 14, $bits mov 14, $bits
...@@ -281,10 +314,9 @@ $code.=<<___; ...@@ -281,10 +314,9 @@ $code.=<<___;
nop nop
ldd [$inp + 24], %f6 ldd [$inp + 24], %f6
alignaddr $inp, $tmp, %g0 fshiftorx %f0, %f2, %f10, %f0
faligndata %f0, %f2, %f0 fshiftorx %f2, %f4, %f10, %f2
faligndata %f2, %f4, %f2 fshiftorx %f4, %f6, %f10, %f4
faligndata %f4, %f6, %f4
.L192aligned: .L192aligned:
mov 12, $bits mov 12, $bits
...@@ -326,9 +358,8 @@ $code.=<<___; ...@@ -326,9 +358,8 @@ $code.=<<___;
nop nop
ldd [$inp + 16], %f4 ldd [$inp + 16], %f4
alignaddr $inp, $tmp, %g0 fshiftorx %f0, %f2, %f10, %f0
faligndata %f0, %f2, %f0 fshiftorx %f2, %f4, %f10, %f2
faligndata %f2, %f4, %f2
.L128aligned: .L128aligned:
mov 10, $bits mov 10, $bits
...@@ -358,7 +389,7 @@ ___ ...@@ -358,7 +389,7 @@ ___
{ {
my ($inp,$out,$len,$key,$ivp,$dir) = map("%i$_",(0..5)); my ($inp,$out,$len,$key,$ivp,$dir) = map("%i$_",(0..5));
my ($rounds,$inner,$end,$inc,$ialign,$oalign,$mask) = map("%l$_",(0..7)); my ($rounds,$inner,$end,$inc,$ialign,$oalign,$mask) = map("%l$_",(0..7));
my ($out0,$out1,$iv0,$iv1,$r0hi,$r0lo,$rlhi,$rllo,$in0,$in1,$intail,$outhead) my ($iv0,$iv1,$r0hi,$r0lo,$rlhi,$rllo,$in0,$in1,$intail,$outhead,$fshift)
= map("%f$_",grep { !($_ & 1) } (16 .. 62)); = map("%f$_",grep { !($_ & 1) } (16 .. 62));
my ($ileft,$iright) = ($ialign,$oalign); my ($ileft,$iright) = ($ialign,$oalign);
...@@ -368,14 +399,20 @@ $code.=<<___; ...@@ -368,14 +399,20 @@ $code.=<<___;
aes_fx_cbc_encrypt: aes_fx_cbc_encrypt:
save %sp, -STACK_FRAME-16, %sp save %sp, -STACK_FRAME-16, %sp
srln $len, 4, $len srln $len, 4, $len
brz,pn $len, .Lcbc_no_data
and $inp, 7, $ialign and $inp, 7, $ialign
andn $inp, 7, $inp andn $inp, 7, $inp
brz,pn $len, .Lcbc_no_data
sll $ialign, 3, $ileft
1: call .+8
add %o7, .Linp_align-1b, %o7
ld [$key + 240], $rounds ld [$key + 240], $rounds
and $out, 7, $oalign and $out, 7, $oalign
ld [$ivp + 0], %f0 ! load ivec ld [$ivp + 0], %f0 ! load ivec
andn $out, 7, $out
ld [$ivp + 4], %f1 ld [$ivp + 4], %f1
sll $oalign, 3, $mask
ld [$ivp + 8], %f2 ld [$ivp + 8], %f2
ld [$ivp + 12], %f3 ld [$ivp + 12], %f3
...@@ -394,6 +431,8 @@ aes_fx_cbc_encrypt: ...@@ -394,6 +431,8 @@ aes_fx_cbc_encrypt:
ldd [$key + 16], %f10 ! round[1] ldd [$key + 16], %f10 ! round[1]
ldd [$key + 24], %f12 ldd [$key + 24], %f12
ldd [%o7 + $ileft], $fshift ! shift left params
add %o7, 64, %o7
ldd [$inp - 16], $in0 ! load input ldd [$inp - 16], $in0 ! load input
ldd [$inp - 8], $in1 ldd [$inp - 8], $in1
ldda [$inp]0x82, $intail ! non-faulting load ldda [$inp]0x82, $intail ! non-faulting load
...@@ -402,11 +441,9 @@ aes_fx_cbc_encrypt: ...@@ -402,11 +441,9 @@ aes_fx_cbc_encrypt:
fxor $r0hi, %f0, %f0 ! ivec^=round[0] fxor $r0hi, %f0, %f0 ! ivec^=round[0]
fxor $r0lo, %f2, %f2 fxor $r0lo, %f2, %f2
alignaddr $inp, $ialign, %g0 fshiftorx $in0, $in1, $fshift, $in0
faligndata $in0, $in1, $in0 fshiftorx $in1, $intail, $fshift, $in1
faligndata $in1, $intail, $in1 nop
fxor $r0hi, $rlhi, $rlhi ! round[last]^=round[0]
fxor $r0lo, $rllo, $rllo
.Loop_cbc_enc: .Loop_cbc_enc:
fxor $in0, %f0, %f0 ! inp^ivec^round[0] fxor $in0, %f0, %f0 ! inp^ivec^round[0]
...@@ -439,44 +476,46 @@ aes_fx_cbc_encrypt: ...@@ -439,44 +476,46 @@ aes_fx_cbc_encrypt:
ldd [$end + 16], %f10 ! round[last-1] ldd [$end + 16], %f10 ! round[last-1]
ldd [$end + 24], %f12 ldd [$end + 24], %f12
fmovd %f0, %f4
faesencx %f2, %f6, %f0
faesencx %f4, %f8, %f2
movrz $len, 0, $inc movrz $len, 0, $inc
fmovd $intail, $in0 fmovd $intail, $in0
ldd [$inp - 8], $in1 ! load next input block ldd [$inp - 8], $in1 ! load next input block
ldda [$inp]0x82, $intail ! non-faulting load ldda [$inp]0x82, $intail ! non-faulting load
add $inp, $inc, $inp ! inp+=16 add $inp, $inc, $inp ! inp+=16
fmovd %f0, %f4
faesencx %f2, %f6, %f0
faesencx %f4, %f8, %f2
fshiftorx $in0, $in1, $fshift, $in0
fshiftorx $in1, $intail, $fshift, $in1
fmovd %f0, %f4 fmovd %f0, %f4
faesencx %f2, %f10, %f0 faesencx %f2, %f10, %f0
faesencx %f4, %f12, %f2 faesencx %f4, %f12, %f2
ldd [$key + 16], %f10 ! round[1] ldd [$key + 16], %f10 ! round[1]
ldd [$key + 24], %f12 ldd [$key + 24], %f12
faligndata $in0, $in1, $in0 fxor $r0hi, $in0, $in0 ! inp^=round[0]
faligndata $in1, $intail, $in1 fxor $r0lo, $in1, $in1
fmovd %f0, %f4 fmovd %f0, %f4
faesenclx %f2, $rlhi, %f0 ! result is out^round[0] faesenclx %f2, $rlhi, %f0
faesenclx %f4, $rllo, %f2 faesenclx %f4, $rllo, %f2
fxor %f0, $r0hi, $out0 ! out^round[0]^round[0]
brnz,pn $oalign, .Lcbc_enc_unaligned_out brnz,pn $oalign, .Lcbc_enc_unaligned_out
fxor %f2, $r0lo, $out1 nop
std $out0, [$out + 0] std %f0, [$out + 0]
std $out1, [$out + 8] std %f2, [$out + 8]
add $out, 16, $out add $out, 16, $out
brnz,a $len, .Loop_cbc_enc brnz,a $len, .Loop_cbc_enc
sub $len, 1, $len sub $len, 1, $len
st $out0, [$ivp + 0] ! output ivec st %f0, [$ivp + 0] ! output ivec
st $out0#lo, [$ivp + 4] st %f1, [$ivp + 4]
st $out1, [$ivp + 8] st %f2, [$ivp + 8]
st $out1#lo, [$ivp + 12] st %f3, [$ivp + 12]
.Lcbc_no_data: .Lcbc_no_data:
ret ret
...@@ -484,22 +523,26 @@ aes_fx_cbc_encrypt: ...@@ -484,22 +523,26 @@ aes_fx_cbc_encrypt:
.align 32 .align 32
.Lcbc_enc_unaligned_out: .Lcbc_enc_unaligned_out:
alignaddrl $out, %g0, $out ldd [%o7 + $mask], $fshift ! shift right params
mov 0xff, $mask mov 0xff, $mask
sll $ialign, 3, $ileft
srl $mask, $oalign, $mask srl $mask, $oalign, $mask
sub %g0, $ileft, $iright sub %g0, $ileft, $iright
faligndata $out0, $out0, %f6 fshiftorx %f0, %f0, $fshift, %f6
faligndata $out0, $out1, %f8 fshiftorx %f0, %f2, $fshift, %f8
stda %f6, [$out + $mask]0xc0 ! partial store stda %f6, [$out + $mask]0xc0 ! partial store
orn %g0, $mask, $mask
std %f8, [$out + 8] std %f8, [$out + 8]
add $out, 16, $out add $out, 16, $out
brz $len, .Lcbc_enc_unaligned_out_done brz $len, .Lcbc_enc_unaligned_out_done
orn %g0, $mask, $mask sub $len, 1, $len
b .Loop_cbc_enc_unaligned_out
nop
.align 32
.Loop_cbc_enc_unaligned_out: .Loop_cbc_enc_unaligned_out:
fmovd %f2, $outhead
fxor $in0, %f0, %f0 ! inp^ivec^round[0] fxor $in0, %f0, %f0 ! inp^ivec^round[0]
fxor $in1, %f2, %f2 fxor $in1, %f2, %f2
ldd [$key + 32], %f6 ! round[2] ldd [$key + 32], %f6 ! round[2]
...@@ -513,7 +556,7 @@ aes_fx_cbc_encrypt: ...@@ -513,7 +556,7 @@ aes_fx_cbc_encrypt:
ldx [$inp - 16], %o0 ldx [$inp - 16], %o0
ldx [$inp - 8], %o1 ldx [$inp - 8], %o1
brz $ialign, .Lcbc_enc_aligned_inp brz $ileft, .Lcbc_enc_aligned_inp
movrz $len, 0, $inc movrz $len, 0, $inc
ldx [$inp], %o2 ldx [$inp], %o2
...@@ -536,6 +579,7 @@ aes_fx_cbc_encrypt: ...@@ -536,6 +579,7 @@ aes_fx_cbc_encrypt:
stx %o0, [%sp + LOCALS + 0] stx %o0, [%sp + LOCALS + 0]
stx %o1, [%sp + LOCALS + 8] stx %o1, [%sp + LOCALS + 8]
add $inp, $inc, $inp ! inp+=16 add $inp, $inc, $inp ! inp+=16
nop
.Lcbc_enc_unaligned: .Lcbc_enc_unaligned:
fmovd %f0, %f4 fmovd %f0, %f4
...@@ -563,6 +607,7 @@ aes_fx_cbc_encrypt: ...@@ -563,6 +607,7 @@ aes_fx_cbc_encrypt:
fmovd %f0, %f4 fmovd %f0, %f4
faesencx %f2, %f6, %f0 faesencx %f2, %f6, %f0
faesencx %f4, %f8, %f2 faesencx %f4, %f8, %f2
ldd [%sp + LOCALS + 0], $in0 ldd [%sp + LOCALS + 0], $in0
ldd [%sp + LOCALS + 8], $in1 ldd [%sp + LOCALS + 8], $in1
...@@ -572,16 +617,15 @@ aes_fx_cbc_encrypt: ...@@ -572,16 +617,15 @@ aes_fx_cbc_encrypt:
ldd [$key + 16], %f10 ! round[1] ldd [$key + 16], %f10 ! round[1]
ldd [$key + 24], %f12 ldd [$key + 24], %f12
fxor $r0hi, $in0, $in0 ! inp^=round[0]
fxor $r0lo, $in1, $in1
fmovd %f0, %f4 fmovd %f0, %f4
faesenclx %f2, $rlhi, %f0 ! result is out^round[0] faesenclx %f2, $rlhi, %f0
faesenclx %f4, $rllo, %f2 faesenclx %f4, $rllo, %f2
fmovd $out1, $outhead fshiftorx $outhead, %f0, $fshift, %f6
fxor %f0, $r0hi, $out0 ! out^round[0]^round[0] fshiftorx %f0, %f2, $fshift, %f8
fxor %f2, $r0lo, $out1
faligndata $outhead, $out0, %f6
faligndata $out0, $out1, %f8
std %f6, [$out + 0] std %f6, [$out + 0]
std %f8, [$out + 8] std %f8, [$out + 8]
add $out, 16, $out add $out, 16, $out
...@@ -590,22 +634,21 @@ aes_fx_cbc_encrypt: ...@@ -590,22 +634,21 @@ aes_fx_cbc_encrypt:
sub $len, 1, $len sub $len, 1, $len
.Lcbc_enc_unaligned_out_done: .Lcbc_enc_unaligned_out_done:
faligndata $out1, $out1, %f8 fshiftorx %f2, %f2, $fshift, %f8
stda %f8, [$out + $mask]0xc0 ! partial store stda %f8, [$out + $mask]0xc0 ! partial store
st $out0, [$ivp + 0] ! output ivec st %f0, [$ivp + 0] ! output ivec
st $out0#lo, [$ivp + 4] st %f1, [$ivp + 4]
st $out1, [$ivp + 8] st %f2, [$ivp + 8]
st $out1#lo, [$ivp + 12] st %f3, [$ivp + 12]
ret ret
restore restore
.align 32 .align 32
.Lcbc_decrypt: .Lcbc_decrypt:
alignaddr $inp, $ialign, %g0 fshiftorx $in0, $in1, $fshift, $in0
faligndata $in0, $in1, $in0 fshiftorx $in1, $intail, $fshift, $in1
faligndata $in1, $intail, $in1
fmovd %f0, $iv0 fmovd %f0, $iv0
fmovd %f2, $iv1 fmovd %f2, $iv1
...@@ -660,8 +703,8 @@ aes_fx_cbc_encrypt: ...@@ -660,8 +703,8 @@ aes_fx_cbc_encrypt:
ldd [$key + 16], %f10 ! round[1] ldd [$key + 16], %f10 ! round[1]
ldd [$key + 24], %f12 ldd [$key + 24], %f12
faligndata $in0, $in1, $in0 fshiftorx $in0, $in1, $fshift, $in0
faligndata $in1, $intail, $in1 fshiftorx $in1, $intail, $fshift, $in1
fmovd %f0, %f4 fmovd %f0, %f4
faesdeclx %f2, %f6, %f0 faesdeclx %f2, %f6, %f0
...@@ -687,21 +730,24 @@ aes_fx_cbc_encrypt: ...@@ -687,21 +730,24 @@ aes_fx_cbc_encrypt:
.align 32 .align 32
.Lcbc_dec_unaligned_out: .Lcbc_dec_unaligned_out:
alignaddrl $out, %g0, $out ldd [%o7 + $mask], $fshift ! shift right params
mov 0xff, $mask mov 0xff, $mask
sll $ialign, 3, $ileft
srl $mask, $oalign, $mask srl $mask, $oalign, $mask
sub %g0, $ileft, $iright sub %g0, $ileft, $iright
faligndata %f0, %f0, $out0 fshiftorx %f0, %f0, $fshift, %f6
faligndata %f0, %f2, $out1 fshiftorx %f0, %f2, $fshift, %f8
stda $out0, [$out + $mask]0xc0 ! partial store stda %f6, [$out + $mask]0xc0 ! partial store
std $out1, [$out + 8] orn %g0, $mask, $mask
std %f8, [$out + 8]
add $out, 16, $out add $out, 16, $out
brz $len, .Lcbc_dec_unaligned_out_done brz $len, .Lcbc_dec_unaligned_out_done
orn %g0, $mask, $mask sub $len, 1, $len
b .Loop_cbc_dec_unaligned_out
nop
.align 32
.Loop_cbc_dec_unaligned_out: .Loop_cbc_dec_unaligned_out:
fmovd %f2, $outhead fmovd %f2, $outhead
fxor $in0, $r0hi, %f0 ! inp^round[0] fxor $in0, $r0hi, %f0 ! inp^round[0]
...@@ -717,7 +763,7 @@ aes_fx_cbc_encrypt: ...@@ -717,7 +763,7 @@ aes_fx_cbc_encrypt:
ldx [$inp - 16], %o0 ldx [$inp - 16], %o0
ldx [$inp - 8], %o1 ldx [$inp - 8], %o1
brz $ialign, .Lcbc_dec_aligned_inp brz $ileft, .Lcbc_dec_aligned_inp
movrz $len, 0, $inc movrz $len, 0, $inc
ldx [$inp], %o2 ldx [$inp], %o2
...@@ -740,6 +786,7 @@ aes_fx_cbc_encrypt: ...@@ -740,6 +786,7 @@ aes_fx_cbc_encrypt:
stx %o0, [%sp + LOCALS + 0] stx %o0, [%sp + LOCALS + 0]
stx %o1, [%sp + LOCALS + 8] stx %o1, [%sp + LOCALS + 8]
add $inp, $inc, $inp ! inp+=16 add $inp, $inc, $inp ! inp+=16
nop
.Lcbc_dec_unaligned: .Lcbc_dec_unaligned:
fmovd %f0, %f4 fmovd %f0, %f4
...@@ -767,10 +814,13 @@ aes_fx_cbc_encrypt: ...@@ -767,10 +814,13 @@ aes_fx_cbc_encrypt:
fmovd %f0, %f4 fmovd %f0, %f4
faesdecx %f2, %f6, %f0 faesdecx %f2, %f6, %f0
faesdecx %f4, %f8, %f2 faesdecx %f4, %f8, %f2
fxor $iv0, $rlhi, %f6 ! ivec^round[last] fxor $iv0, $rlhi, %f6 ! ivec^round[last]
fxor $iv1, $rllo, %f8 fxor $iv1, $rllo, %f8
fmovd $in0, $iv0 fmovd $in0, $iv0
fmovd $in1, $iv1 fmovd $in1, $iv1
ldd [%sp + LOCALS + 0], $in0
ldd [%sp + LOCALS + 8], $in1
fmovd %f0, %f4 fmovd %f0, %f4
faesdecx %f2, %f10, %f0 faesdecx %f2, %f10, %f0
...@@ -781,20 +831,18 @@ aes_fx_cbc_encrypt: ...@@ -781,20 +831,18 @@ aes_fx_cbc_encrypt:
fmovd %f0, %f4 fmovd %f0, %f4
faesdeclx %f2, %f6, %f0 faesdeclx %f2, %f6, %f0
faesdeclx %f4, %f8, %f2 faesdeclx %f4, %f8, %f2
ldd [%sp + LOCALS + 0], $in0
ldd [%sp + LOCALS + 8], $in1
faligndata $outhead, %f0, $out0 fshiftorx $outhead, %f0, $fshift, %f6
faligndata %f0, %f2, $out1 fshiftorx %f0, %f2, $fshift, %f8
std $out0, [$out + 0] std %f6, [$out + 0]
std $out1, [$out + 8] std %f8, [$out + 8]
add $out, 16, $out add $out, 16, $out
brnz,a $len, .Loop_cbc_dec_unaligned_out brnz,a $len, .Loop_cbc_dec_unaligned_out
sub $len, 1, $len sub $len, 1, $len
.Lcbc_dec_unaligned_out_done: .Lcbc_dec_unaligned_out_done:
faligndata %f2, %f2, %f8 fshiftorx %f2, %f2, $fshift, %f8
stda %f8, [$out + $mask]0xc0 ! partial store stda %f8, [$out + $mask]0xc0 ! partial store
st $iv0, [$ivp + 0] ! output ivec st $iv0, [$ivp + 0] ! output ivec
...@@ -811,7 +859,7 @@ ___ ...@@ -811,7 +859,7 @@ ___
{ {
my ($inp,$out,$len,$key,$ivp) = map("%i$_",(0..5)); my ($inp,$out,$len,$key,$ivp) = map("%i$_",(0..5));
my ($rounds,$inner,$end,$inc,$ialign,$oalign,$mask) = map("%l$_",(0..7)); my ($rounds,$inner,$end,$inc,$ialign,$oalign,$mask) = map("%l$_",(0..7));
my ($out0,$out1,$ctr0,$ctr1,$r0hi,$r0lo,$rlhi,$rllo,$in0,$in1,$intail,$outhead) my ($ctr0,$ctr1,$r0hi,$r0lo,$rlhi,$rllo,$in0,$in1,$intail,$outhead,$fshift)
= map("%f$_",grep { !($_ & 1) } (16 .. 62)); = map("%f$_",grep { !($_ & 1) } (16 .. 62));
my ($ileft,$iright) = ($ialign, $oalign); my ($ileft,$iright) = ($ialign, $oalign);
my $one = "%f14"; my $one = "%f14";
...@@ -822,22 +870,23 @@ $code.=<<___; ...@@ -822,22 +870,23 @@ $code.=<<___;
aes_fx_ctr32_encrypt_blocks: aes_fx_ctr32_encrypt_blocks:
save %sp, -STACK_FRAME-16, %sp save %sp, -STACK_FRAME-16, %sp
srln $len, 0, $len srln $len, 0, $len
brz,pn $len, .Lctr32_no_data
nop
and $inp, 7, $ialign and $inp, 7, $ialign
andn $inp, 7, $inp andn $inp, 7, $inp
brz,pn $len, .Lctr32_no_data
sll $ialign, 3, $ileft
.Lpic: call .+8 .Lpic: call .+8
add %o7, .Lone - .Lpic, %o0 add %o7, .Linp_align - .Lpic, %o7
ld [$key + 240], $rounds ld [$key + 240], $rounds
and $out, 7, $oalign and $out, 7, $oalign
ld [$ivp + 0], $ctr0 ! load counter ld [$ivp + 0], $ctr0 ! load counter
andn $out, 7, $out
ld [$ivp + 4], $ctr0#lo ld [$ivp + 4], $ctr0#lo
sll $oalign, 3, $mask
ld [$ivp + 8], $ctr1 ld [$ivp + 8], $ctr1
ld [$ivp + 12], $ctr1#lo ld [$ivp + 12], $ctr1#lo
ldd [%o0], $one ldd [%o7 + 128], $one
sll $rounds, 4, $rounds sll $rounds, 4, $rounds
add $rounds, $key, $end add $rounds, $key, $end
...@@ -854,14 +903,15 @@ aes_fx_ctr32_encrypt_blocks: ...@@ -854,14 +903,15 @@ aes_fx_ctr32_encrypt_blocks:
ldd [$end + 0], $rlhi ! round[last] ldd [$end + 0], $rlhi ! round[last]
ldd [$end + 8], $rllo ldd [$end + 8], $rllo
ldd [%o7 + $ileft], $fshift ! shiftleft params
add %o7, 64, %o7
ldd [$inp - 16], $in0 ! load input ldd [$inp - 16], $in0 ! load input
ldd [$inp - 8], $in1 ldd [$inp - 8], $in1
ldda [$inp]0x82, $intail ! non-faulting load ldda [$inp]0x82, $intail ! non-faulting load
add $inp, $inc, $inp ! inp+=16 add $inp, $inc, $inp ! inp+=16
alignaddr $inp, $ialign, %g0 fshiftorx $in0, $in1, $fshift, $in0
faligndata $in0, $in1, $in0 fshiftorx $in1, $intail, $fshift, $in1
faligndata $in1, $intail, $in1
.Loop_ctr32: .Loop_ctr32:
fxor $ctr0, $r0hi, %f0 ! counter^round[0] fxor $ctr0, $r0hi, %f0 ! counter^round[0]
...@@ -912,8 +962,8 @@ aes_fx_ctr32_encrypt_blocks: ...@@ -912,8 +962,8 @@ aes_fx_ctr32_encrypt_blocks:
ldd [$key + 16], %f10 ! round[1] ldd [$key + 16], %f10 ! round[1]
ldd [$key + 24], %f12 ldd [$key + 24], %f12
faligndata $in0, $in1, $in0 fshiftorx $in0, $in1, $fshift, $in0
faligndata $in1, $intail, $in1 fshiftorx $in1, $intail, $fshift, $in1
fpadd32 $ctr1, $one, $ctr1 ! increment counter fpadd32 $ctr1, $one, $ctr1 ! increment counter
fmovd %f0, %f4 fmovd %f0, %f4
...@@ -936,21 +986,24 @@ aes_fx_ctr32_encrypt_blocks: ...@@ -936,21 +986,24 @@ aes_fx_ctr32_encrypt_blocks:
.align 32 .align 32
.Lctr32_unaligned_out: .Lctr32_unaligned_out:
alignaddrl $out, %g0, $out ldd [%o7 + $mask], $fshift ! shift right params
mov 0xff, $mask mov 0xff, $mask
sll $ialign, 3, $ileft
srl $mask, $oalign, $mask srl $mask, $oalign, $mask
sub %g0, $ileft, $iright sub %g0, $ileft, $iright
faligndata %f0, %f0, $out0 fshiftorx %f0, %f0, $fshift, %f6
faligndata %f0, %f2, $out1 fshiftorx %f0, %f2, $fshift, %f8
stda $out0, [$out + $mask]0xc0 ! partial store stda %f6, [$out + $mask]0xc0 ! partial store
std $out1, [$out + 8] orn %g0, $mask, $mask
std %f8, [$out + 8]
add $out, 16, $out add $out, 16, $out
brz $len, .Lctr32_unaligned_out_done brz $len, .Lctr32_unaligned_out_done
orn %g0, $mask, $mask sub $len, 1, $len
b .Loop_ctr32_unaligned_out
nop
.align 32
.Loop_ctr32_unaligned_out: .Loop_ctr32_unaligned_out:
fmovd %f2, $outhead fmovd %f2, $outhead
fxor $ctr0, $r0hi, %f0 ! counter^round[0] fxor $ctr0, $r0hi, %f0 ! counter^round[0]
...@@ -966,7 +1019,7 @@ aes_fx_ctr32_encrypt_blocks: ...@@ -966,7 +1019,7 @@ aes_fx_ctr32_encrypt_blocks:
ldx [$inp - 16], %o0 ldx [$inp - 16], %o0
ldx [$inp - 8], %o1 ldx [$inp - 8], %o1
brz $ialign, .Lctr32_aligned_inp brz $ileft, .Lctr32_aligned_inp
movrz $len, 0, $inc movrz $len, 0, $inc
ldx [$inp], %o2 ldx [$inp], %o2
...@@ -989,6 +1042,7 @@ aes_fx_ctr32_encrypt_blocks: ...@@ -989,6 +1042,7 @@ aes_fx_ctr32_encrypt_blocks:
stx %o0, [%sp + LOCALS + 0] stx %o0, [%sp + LOCALS + 0]
stx %o1, [%sp + LOCALS + 8] stx %o1, [%sp + LOCALS + 8]
add $inp, $inc, $inp ! inp+=16 add $inp, $inc, $inp ! inp+=16
nop
.Lctr32_enc_unaligned: .Lctr32_enc_unaligned:
fmovd %f0, %f4 fmovd %f0, %f4
...@@ -1032,24 +1086,43 @@ aes_fx_ctr32_encrypt_blocks: ...@@ -1032,24 +1086,43 @@ aes_fx_ctr32_encrypt_blocks:
faesenclx %f2, %f6, %f0 faesenclx %f2, %f6, %f0
faesenclx %f4, %f8, %f2 faesenclx %f4, %f8, %f2
faligndata $outhead, %f0, $out0 fshiftorx $outhead, %f0, $fshift, %f6
faligndata %f0, %f2, $out1 fshiftorx %f0, %f2, $fshift, %f8
std $out0, [$out + 0] std %f6, [$out + 0]
std $out1, [$out + 8] std %f8, [$out + 8]
add $out, 16, $out add $out, 16, $out
brnz,a $len, .Loop_ctr32_unaligned_out brnz,a $len, .Loop_ctr32_unaligned_out
sub $len, 1, $len sub $len, 1, $len
.Lctr32_unaligned_out_done: .Lctr32_unaligned_out_done:
faligndata %f2, %f2, %f8 fshiftorx %f2, %f2, $fshift, %f8
stda %f8, [$out + $mask]0xc0 ! partial store stda %f8, [$out + $mask]0xc0 ! partial store
ret ret
restore restore
.type aes_fx_ctr32_encrypt_blocks,#function .type aes_fx_ctr32_encrypt_blocks,#function
.size aes_fx_ctr32_encrypt_blocks,.-aes_fx_ctr32_encrypt_blocks .size aes_fx_ctr32_encrypt_blocks,.-aes_fx_ctr32_encrypt_blocks
.align 32 .align 32
.Linp_align: ! fshiftorx parameters for left shift toward %rs1
.byte 0, 0, 64, 0, 0, 64, 0, -64
.byte 0, 0, 56, 8, 0, 56, 8, -56
.byte 0, 0, 48, 16, 0, 48, 16, -48
.byte 0, 0, 40, 24, 0, 40, 24, -40
.byte 0, 0, 32, 32, 0, 32, 32, -32
.byte 0, 0, 24, 40, 0, 24, 40, -24
.byte 0, 0, 16, 48, 0, 16, 48, -16
.byte 0, 0, 8, 56, 0, 8, 56, -8
.Lout_align: ! fshiftorx parameters for right shift toward %rs2
.byte 0, 0, 0, 64, 0, 0, 64, 0
.byte 0, 0, 8, 56, 0, 8, 56, -8
.byte 0, 0, 16, 48, 0, 16, 48, -16
.byte 0, 0, 24, 40, 0, 24, 40, -24
.byte 0, 0, 32, 32, 0, 32, 32, -32
.byte 0, 0, 40, 24, 0, 40, 24, -40
.byte 0, 0, 48, 16, 0, 48, 16, -48
.byte 0, 0, 56, 8, 0, 56, 8, -56
.Lone: .Lone:
.word 0, 1 .word 0, 1
.asciz "AES for Fujitsu SPARC64 X, CRYPTOGAMS by <appro\@openssl.org>" .asciz "AES for Fujitsu SPARC64 X, CRYPTOGAMS by <appro\@openssl.org>"
...@@ -1148,13 +1221,42 @@ my %aesopf = ( "faesencx" => 0x90, ...@@ -1148,13 +1221,42 @@ my %aesopf = ( "faesencx" => 0x90,
} }
} }
sub unfx3src {
my ($mnemonic,$rs1,$rs2,$rs3,$rd)=@_;
my ($ref,$opf);
my %aesopf = ( "fshiftorx" => 0x0b );
$ref = "$mnemonic\t$rs1,$rs2,$rs3,$rd";
if (defined($opf=$aesopf{$mnemonic})) {
foreach ($rs1,$rs2,$rs3,$rd) {
return $ref if (!/%f([0-9]{1,2})/);
$_=$1;
if ($1>=32) {
return $ref if ($1&1);
# re-encode for upper double register addressing
$_=($1|$1>>5)&31;
}
}
return sprintf ".word\t0x%08x !%s",
2<<30|$rd<<25|0x37<<19|$rs1<<14|$rs3<<9|$opf<<5|$rs2,
$ref;
} else {
return $ref;
}
}
foreach (split("\n",$code)) { foreach (split("\n",$code)) {
s/\`([^\`]*)\`/eval $1/ge; s/\`([^\`]*)\`/eval $1/ge;
s/%f([0-9]+)#lo/sprintf "%%f%d",$1+1/ge; s/%f([0-9]+)#lo/sprintf "%%f%d",$1+1/ge;
s/\b(faes[^x]{3,4}x)\s+(%f[0-9]{1,2}),\s*([%fx0-9]+),\s*(%f[0-9]{1,2})/ s/\b(faes[^x]{3,4}x)\s+(%f[0-9]{1,2}),\s*([%fx0-9]+),\s*(%f[0-9]{1,2})/
&unfx($1,$2,$3,$4,$5) &unfx($1,$2,$3,$4)
/ge or
s/\b([f][^\s]*)\s+(%f[0-9]{1,2}),\s*(%f[0-9]{1,2}),\s*(%f[0-9]{1,2}),\s*(%f[0-9]{1,2})/
&unfx3src($1,$2,$3,$4,$5)
/ge or /ge or
s/\b([fb][^\s]*)\s+(%f[0-9]{1,2}),\s*(%f[0-9]{1,2}),\s*(%f[0-9]{1,2})/ s/\b([fb][^\s]*)\s+(%f[0-9]{1,2}),\s*(%f[0-9]{1,2}),\s*(%f[0-9]{1,2})/
&unvis($1,$2,$3,$4) &unvis($1,$2,$3,$4)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册