diff --git a/Configure b/Configure index f6c271fdbc248cfb806b4e310ea77e7e6adcc301..2333a63f1ebe1676932ab7b3ef2aeec9664220b8 100755 --- a/Configure +++ b/Configure @@ -130,7 +130,7 @@ my $x86_elf_asm="$x86_asm:elf"; my $x86_64_asm="x86_64cpuid.o:x86_64-gcc.o x86_64-mont.o x86_64-mont5.o x86_64-gf2m.o modexp512-x86_64.o::aes-x86_64.o vpaes-x86_64.o bsaes-x86_64.o aesni-x86_64.o aesni-sha1-x86_64.o::md5-x86_64.o:sha1-x86_64.o sha256-x86_64.o sha512-x86_64.o::rc4-x86_64.o rc4-md5-x86_64.o:::wp-x86_64.o:cmll-x86_64.o cmll_misc.o:ghash-x86_64.o:e_padlock-x86_64.o"; my $ia64_asm="ia64cpuid.o:bn-ia64.o ia64-mont.o::aes_core.o aes_cbc.o aes-ia64.o::md5-ia64.o:sha1-ia64.o sha256-ia64.o sha512-ia64.o::rc4-ia64.o rc4_skey.o:::::ghash-ia64.o::void"; -my $sparcv9_asm="sparcv9cap.o sparccpuid.o:bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o:des_enc-sparc.o fcrypt_b.o:aes_core.o aes_cbc.o aes-sparcv9.o:::sha1-sparcv9.o sha256-sparcv9.o sha512-sparcv9.o:::::::ghash-sparcv9.o::void"; +my $sparcv9_asm="sparcv9cap.o sparccpuid.o:bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o:des_enc-sparc.o fcrypt_b.o:aes_core.o aes_cbc.o aes-sparcv9.o::md5-sparcv9.o:sha1-sparcv9.o sha256-sparcv9.o sha512-sparcv9.o:::::::ghash-sparcv9.o::void"; my $sparcv8_asm=":sparcv8.o:des_enc-sparc.o fcrypt_b.o:::::::::::::void"; my $alpha_asm="alphacpuid.o:bn_asm.o alpha-mont.o:::::sha1-alpha.o:::::::ghash-alpha.o::void"; my $mips64_asm=":bn-mips.o mips-mont.o::aes_cbc.o aes-mips.o:::sha1-mips.o sha256-mips.o sha512-mips.o::::::::"; diff --git a/TABLE b/TABLE index 2d29d84fd6320ccb56ad2ba2142b37522425a6e7..397f83ed4513771514cc50f989c3a6f58670b028 100644 --- a/TABLE +++ b/TABLE @@ -178,7 +178,7 @@ $bn_obj = bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o $des_obj = des_enc-sparc.o fcrypt_b.o $aes_obj = aes_core.o aes_cbc.o aes-sparcv9.o $bf_obj = -$md5_obj = +$md5_obj = md5-sparcv9.o $sha1_obj = sha1-sparcv9.o sha256-sparcv9.o sha512-sparcv9.o $cast_obj = $rc4_obj = @@ -2620,7 +2620,7 @@ $bn_obj = bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o $des_obj = des_enc-sparc.o fcrypt_b.o $aes_obj = aes_core.o aes_cbc.o aes-sparcv9.o $bf_obj = -$md5_obj = +$md5_obj = md5-sparcv9.o $sha1_obj = sha1-sparcv9.o sha256-sparcv9.o sha512-sparcv9.o $cast_obj = $rc4_obj = @@ -2653,7 +2653,7 @@ $bn_obj = bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o $des_obj = des_enc-sparc.o fcrypt_b.o $aes_obj = aes_core.o aes_cbc.o aes-sparcv9.o $bf_obj = -$md5_obj = +$md5_obj = md5-sparcv9.o $sha1_obj = sha1-sparcv9.o sha256-sparcv9.o sha512-sparcv9.o $cast_obj = $rc4_obj = @@ -4402,7 +4402,7 @@ $bn_obj = bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o $des_obj = des_enc-sparc.o fcrypt_b.o $aes_obj = aes_core.o aes_cbc.o aes-sparcv9.o $bf_obj = -$md5_obj = +$md5_obj = md5-sparcv9.o $sha1_obj = sha1-sparcv9.o sha256-sparcv9.o sha512-sparcv9.o $cast_obj = $rc4_obj = @@ -4600,7 +4600,7 @@ $bn_obj = bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o $des_obj = des_enc-sparc.o fcrypt_b.o $aes_obj = aes_core.o aes_cbc.o aes-sparcv9.o $bf_obj = -$md5_obj = +$md5_obj = md5-sparcv9.o $sha1_obj = sha1-sparcv9.o sha256-sparcv9.o sha512-sparcv9.o $cast_obj = $rc4_obj = @@ -5458,7 +5458,7 @@ $bn_obj = bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o $des_obj = des_enc-sparc.o fcrypt_b.o $aes_obj = aes_core.o aes_cbc.o aes-sparcv9.o $bf_obj = -$md5_obj = +$md5_obj = md5-sparcv9.o $sha1_obj = sha1-sparcv9.o sha256-sparcv9.o sha512-sparcv9.o $cast_obj = $rc4_obj = @@ -5491,7 +5491,7 @@ $bn_obj = bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o $des_obj = des_enc-sparc.o fcrypt_b.o $aes_obj = aes_core.o aes_cbc.o aes-sparcv9.o $bf_obj = -$md5_obj = +$md5_obj = md5-sparcv9.o $sha1_obj = sha1-sparcv9.o sha256-sparcv9.o sha512-sparcv9.o $cast_obj = $rc4_obj = @@ -5590,7 +5590,7 @@ $bn_obj = bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o $des_obj = des_enc-sparc.o fcrypt_b.o $aes_obj = aes_core.o aes_cbc.o aes-sparcv9.o $bf_obj = -$md5_obj = +$md5_obj = md5-sparcv9.o $sha1_obj = sha1-sparcv9.o sha256-sparcv9.o sha512-sparcv9.o $cast_obj = $rc4_obj = @@ -5623,7 +5623,7 @@ $bn_obj = bn-sparcv9.o sparcv9-mont.o sparcv9a-mont.o $des_obj = des_enc-sparc.o fcrypt_b.o $aes_obj = aes_core.o aes_cbc.o aes-sparcv9.o $bf_obj = -$md5_obj = +$md5_obj = md5-sparcv9.o $sha1_obj = sha1-sparcv9.o sha256-sparcv9.o sha512-sparcv9.o $cast_obj = $rc4_obj = diff --git a/crypto/md5/Makefile b/crypto/md5/Makefile index 7d42da4fd108feafe94cee2d55f05d99bb93432a..5b289213f593e2a4a856ca5296484bead1def264 100644 --- a/crypto/md5/Makefile +++ b/crypto/md5/Makefile @@ -52,6 +52,9 @@ md5-ia64.s: asm/md5-ia64.S $(CC) $(CFLAGS) -E asm/md5-ia64.S | \ $(PERL) -ne 's/;\s+/;\n/g; print;' > $@ +md5-sparcv9.S: asm/md5-sparcv9.pl + $(PERL) asm/md5-sparcv9.pl $@ $(CFLAGS) + files: $(PERL) $(TOP)/util/files.pl Makefile >> $(TOP)/MINFO diff --git a/crypto/md5/asm/md5-sparcv9.pl b/crypto/md5/asm/md5-sparcv9.pl new file mode 100644 index 0000000000000000000000000000000000000000..c9ce0b94ada7b39971157bc286a5987bd695d110 --- /dev/null +++ b/crypto/md5/asm/md5-sparcv9.pl @@ -0,0 +1,284 @@ +#!/usr/bin/env perl + +# ==================================================================== +# Written by Andy Polyakov for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# ==================================================================== + +# MD5 for SPARCv9, 7.5 cycles per byte on UltraSPARC, >40% faster than +# code generated by Sun C 5.2. + +$bits=32; +for (@ARGV) { $bits=64 if (/\-m64/ || /\-xarch\=v9/); } +if ($bits==64) { $bias=2047; $frame=192; } +else { $bias=0; $frame=112; } + +$output=shift; +open STDOUT,">$output"; + +use integer; + +($ctx,$inp,$len)=("%i0","%i1","%i2"); # input arguments + +# 64-bit values +@X=("%o0","%o1","%o2","%o3","%o4","%o5","%o7","%g1","%g2"); +$tx="%g3"; +($AB,$CD)=("%g4","%g5"); + +# 32-bit values +@V=($A,$B,$C,$D)=map("%l$_",(0..3)); +($t1,$t2,$t3,$saved_asi)=map("%l$_",(4..7)); +($shr,$shl1,$shl2)=("%i3","%i4","%i5"); + +my @K=( 0xd76aa478,0xe8c7b756,0x242070db,0xc1bdceee, + 0xf57c0faf,0x4787c62a,0xa8304613,0xfd469501, + 0x698098d8,0x8b44f7af,0xffff5bb1,0x895cd7be, + 0x6b901122,0xfd987193,0xa679438e,0x49b40821, + + 0xf61e2562,0xc040b340,0x265e5a51,0xe9b6c7aa, + 0xd62f105d,0x02441453,0xd8a1e681,0xe7d3fbc8, + 0x21e1cde6,0xc33707d6,0xf4d50d87,0x455a14ed, + 0xa9e3e905,0xfcefa3f8,0x676f02d9,0x8d2a4c8a, + + 0xfffa3942,0x8771f681,0x6d9d6122,0xfde5380c, + 0xa4beea44,0x4bdecfa9,0xf6bb4b60,0xbebfbc70, + 0x289b7ec6,0xeaa127fa,0xd4ef3085,0x04881d05, + 0xd9d4d039,0xe6db99e5,0x1fa27cf8,0xc4ac5665, + + 0xf4292244,0x432aff97,0xab9423a7,0xfc93a039, + 0x655b59c3,0x8f0ccc92,0xffeff47d,0x85845dd1, + 0x6fa87e4f,0xfe2ce6e0,0xa3014314,0x4e0811a1, + 0xf7537e82,0xbd3af235,0x2ad7d2bb,0xeb86d391, 0 ); + +sub R0 { + my ($i,$a,$b,$c,$d) = @_; + my $rot = (7,12,17,22)[$i%4]; + my $j = ($i+1)/2; + + if ($i&1) { + $code.=<<___; + srlx @X[$j],$shr,@X[$j] ! align X[`$i+1`] + and $b,$t1,$t1 ! round $i + sllx @X[$j+1],$shl1,$tx + add $t2,$a,$a + sllx $tx,$shl2,$tx + xor $d,$t1,$t1 + or $tx,@X[$j],@X[$j] + sethi %hi(@K[$i+1]),$t2 + add $t1,$a,$a + or $t2,%lo(@K[$i+1]),$t2 + sll $a,$rot,$t3 + add @X[$j],$t2,$t2 ! X[`$i+1`]+K[`$i+1`] + srl $a,32-$rot,$a + add $b,$t3,$t3 + xor $b,$c,$t1 + add $t3,$a,$a +___ + } else { + $code.=<<___; + srlx @X[$j],32,$tx ! extract X[`2*$j+1`] + and $b,$t1,$t1 ! round $i + add $t2,$a,$a + xor $d,$t1,$t1 + sethi %hi(@K[$i+1]),$t2 + add $t1,$a,$a + or $t2,%lo(@K[$i+1]),$t2 + sll $a,$rot,$t3 + add $tx,$t2,$t2 ! X[`2*$j+1`]+K[`$i+1`] + srl $a,32-$rot,$a + add $b,$t3,$t3 + xor $b,$c,$t1 + add $t3,$a,$a +___ + } +} + +sub R0_1 { + my ($i,$a,$b,$c,$d) = @_; + my $rot = (7,12,17,22)[$i%4]; + +$code.=<<___; + srlx @X[0],32,$tx ! extract X[1] + and $b,$t1,$t1 ! round $i + add $t2,$a,$a + xor $d,$t1,$t1 + sethi %hi(@K[$i+1]),$t2 + add $t1,$a,$a + or $t2,%lo(@K[$i+1]),$t2 + sll $a,$rot,$t3 + add $tx,$t2,$t2 ! X[1]+K[`$i+1`] + srl $a,32-$rot,$a + add $b,$t3,$t3 + andn $b,$c,$t1 + add $t3,$a,$a +___ +} + +sub R1 { + my ($i,$a,$b,$c,$d) = @_; + my $rot = (5,9,14,20)[$i%4]; + my $j = $i<31 ? (1+5*($i+1))%16 : (5+3*($i+1))%16; + my $xi = @X[$j/2]; + +$code.=<<___ if ($j&1 && ($xi=$tx)); + srlx @X[$j/2],32,$xi ! extract X[$j] +___ +$code.=<<___; + and $b,$d,$t3 ! round $i + add $t2,$a,$a + or $t3,$t1,$t1 + sethi %hi(@K[$i+1]),$t2 + add $t1,$a,$a + or $t2,%lo(@K[$i+1]),$t2 + sll $a,$rot,$t3 + add $xi,$t2,$t2 ! X[$j]+K[`$i+1`] + srl $a,32-$rot,$a + add $b,$t3,$t3 + `$i<31?"andn":"xor"` $b,$c,$t1 + add $t3,$a,$a +___ +} + +sub R2 { + my ($i,$a,$b,$c,$d) = @_; + my $rot = (4,11,16,23)[$i%4]; + my $j = $i<47 ? (5+3*($i+1))%16 : (0+7*($i+1))%16; + my $xi = @X[$j/2]; + +$code.=<<___ if ($j&1 && ($xi=$tx)); + srlx @X[$j/2],32,$xi ! extract X[$j] +___ +$code.=<<___; + add $t2,$a,$a ! round $i + xor $b,$t1,$t1 + sethi %hi(@K[$i+1]),$t2 + add $t1,$a,$a + or $t2,%lo(@K[$i+1]),$t2 + sll $a,$rot,$t3 + add $xi,$t2,$t2 ! X[$j]+K[`$i+1`] + srl $a,32-$rot,$a + add $b,$t3,$t3 + xor $b,$c,$t1 + add $t3,$a,$a +___ +} + +sub R3 { + my ($i,$a,$b,$c,$d) = @_; + my $rot = (6,10,15,21)[$i%4]; + my $j = (0+7*($i+1))%16; + my $xi = @X[$j/2]; + +$code.=<<___; + add $t2,$a,$a ! round $i +___ +$code.=<<___ if ($j&1 && ($xi=$tx)); + srlx @X[$j/2],32,$xi ! extract X[$j] +___ +$code.=<<___; + orn $b,$d,$t1 + sethi %hi(@K[$i+1]),$t2 + xor $c,$t1,$t1 + or $t2,%lo(@K[$i+1]),$t2 + add $t1,$a,$a + sll $a,$rot,$t3 + add $xi,$t2,$t2 ! X[$j]+K[`$i+1`] + srl $a,32-$rot,$a + add $b,$t3,$t3 + add $t3,$a,$a +___ +} + +$code.=<<___ if ($bits==64); +.register %g2,#scratch +.register %g3,#scratch +___ +$code.=<<___; +.section ".text",#alloc,#execinstr + +.globl md5_block_asm_data_order +.align 32 +md5_block_asm_data_order: + save %sp,-$frame,%sp + + rd %asi,$saved_asi + wr %g0,0x88,%asi ! ASI_PRIMARY_LITTLE + and $inp,7,$shr + andn $inp,7,$inp + + sll $shr,3,$shr ! *=8 + mov 56,$shl2 + ld [$ctx+0],$A + sub $shl2,$shr,$shl2 + ld [$ctx+4],$B + and $shl2,32,$shl1 + add $shl2,8,$shl2 + ld [$ctx+8],$C + sub $shl2,$shl1,$shl2 ! shr+shl1+shl2==64 + ld [$ctx+12],$D + nop + +.Loop: + cmp $shr,0 ! was inp aligned? + ldxa [$inp+0]%asi,@X[0] ! load little-endian input + ldxa [$inp+8]%asi,@X[1] + ldxa [$inp+16]%asi,@X[2] + ldxa [$inp+24]%asi,@X[3] + ldxa [$inp+32]%asi,@X[4] + sllx $A,32,$AB ! pack A,B + ldxa [$inp+40]%asi,@X[5] + sllx $C,32,$CD ! pack C,D + ldxa [$inp+48]%asi,@X[6] + or $B,$AB,$AB + ldxa [$inp+56]%asi,@X[7] + or $D,$CD,$CD + bnz,a,pn %icc,.+8 + ldxa [$inp+64]%asi,@X[8] + + srlx @X[0],$shr,@X[0] ! align X[0] + sllx @X[1],$shl1,$tx + sethi %hi(@K[0]),$t2 + sllx $tx,$shl2,$tx + or $t2,%lo(@K[0]),$t2 + or $tx,@X[0],@X[0] + xor $C,$D,$t1 + add @X[0],$t2,$t2 ! X[0]+K[0] +___ + for ($i=0;$i<15;$i++) { &R0($i,@V); unshift(@V,pop(@V)); } + for (;$i<16;$i++) { &R0_1($i,@V); unshift(@V,pop(@V)); } + for (;$i<32;$i++) { &R1($i,@V); unshift(@V,pop(@V)); } + for (;$i<48;$i++) { &R2($i,@V); unshift(@V,pop(@V)); } + for (;$i<64;$i++) { &R3($i,@V); unshift(@V,pop(@V)); } +$code.=<<___; + srlx $AB,32,$t1 ! unpack A,B,C,D and accumulate + add $inp,64,$inp ! advance inp + srlx $CD,32,$t2 + add $t1,$A,$A + subcc $len,1,$len ! done yet? + add $AB,$B,$B + add $t2,$C,$C + add $CD,$D,$D + srl $B,0,$B ! clruw $B + bne `$bits==64?"%xcc":"%icc"`,.Loop + srl $D,0,$D ! clruw $D + + st $A,[$ctx+0] ! write out ctx + st $B,[$ctx+4] + st $C,[$ctx+8] + st $D,[$ctx+12] + + wr %g0,$saved_asi,%asi + ret + restore +.type md5_block_asm_data_order,#function +.size md5_block_asm_data_order,(.-md5_block_asm_data_order) + +.asciz "MD5 block transform for SPARCv9, CRYPTOGAMS by " +.align 4 +___ + +$code =~ s/\`([^\`]*)\`/eval $1/gem; +print $code; +close STDOUT; diff --git a/crypto/md5/md5_locl.h b/crypto/md5/md5_locl.h index 968d577995aa2b1be62ba9fa54d12a26981f0078..37e43fe4280d952414c8ddaedfb884db85b331bb 100644 --- a/crypto/md5/md5_locl.h +++ b/crypto/md5/md5_locl.h @@ -71,6 +71,8 @@ # define md5_block_data_order md5_block_asm_data_order # elif defined(__ia64) || defined(__ia64__) || defined(_M_IA64) # define md5_block_data_order md5_block_asm_data_order +# elif defined(__sparc) || defined(__sparc__) +# define md5_block_data_order md5_block_asm_data_order # endif #endif