x86_64-gf2m.pl 8.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
#!/usr/bin/env perl
#
# ====================================================================
# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
# ====================================================================
#
# May 2011
#
# The module implements bn_GF2m_mul_2x2 polynomial multiplication used
# in bn_gf2m.c. It's kind of low-hanging mechanical port from C for
# the time being... Except that it has two code paths: code suitable
# for any x86_64 CPU and PCLMULQDQ one suitable for Westmere and
# later. Improvement varies from one benchmark and -arch to another.
# Vanilla code path is at most 20% faster than compiler-generated code
# [not very impressive], while PCLMULQDQ - whole 85%-160% better on
# 163- and 571-bit ECDH benchmarks on Intel CPUs. Keep in mind that
# these coefficients are not ones for bn_GF2m_mul_2x2 itself, as not
# all CPU time is burnt in it...

$flavour = shift;
$output  = shift;
if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }

$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);

$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
die "can't locate x86_64-xlate.pl";

34 35
open OUT,"| \"$^X\" $xlate $flavour $output";
STDOUT=*OUT;
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156

($lo,$hi)=("%rax","%rdx");	$a=$lo;
($i0,$i1)=("%rsi","%rdi");
($t0,$t1)=("%rbx","%rcx");
($b,$mask)=("%rbp","%r8");
($a1,$a2,$a4,$a8,$a12,$a48)=map("%r$_",(9..15));
($R,$Tx)=("%xmm0","%xmm1");

$code.=<<___;
.text

.type	_mul_1x1,\@abi-omnipotent
.align	16
_mul_1x1:
	sub	\$128+8,%rsp
	mov	\$-1,$a1
	lea	($a,$a),$i0
	shr	\$3,$a1
	lea	(,$a,4),$i1
	and	$a,$a1			# a1=a&0x1fffffffffffffff
	lea	(,$a,8),$a8
	sar	\$63,$a			# broadcast 63rd bit
	lea	($a1,$a1),$a2
	sar	\$63,$i0		# broadcast 62nd bit
	lea	(,$a1,4),$a4
	and	$b,$a
	sar	\$63,$i1		# boardcast 61st bit
	mov	$a,$hi			# $a is $lo
	shl	\$63,$lo
	and	$b,$i0
	shr	\$1,$hi
	mov	$i0,$t1
	shl	\$62,$i0
	and	$b,$i1
	shr	\$2,$t1
	xor	$i0,$lo
	mov	$i1,$t0
	shl	\$61,$i1
	xor	$t1,$hi
	shr	\$3,$t0
	xor	$i1,$lo
	xor	$t0,$hi

	mov	$a1,$a12
	movq	\$0,0(%rsp)		# tab[0]=0
	xor	$a2,$a12		# a1^a2
	mov	$a1,8(%rsp)		# tab[1]=a1
	 mov	$a4,$a48
	mov	$a2,16(%rsp)		# tab[2]=a2
	 xor	$a8,$a48		# a4^a8
	mov	$a12,24(%rsp)		# tab[3]=a1^a2

	xor	$a4,$a1
	mov	$a4,32(%rsp)		# tab[4]=a4
	xor	$a4,$a2
	mov	$a1,40(%rsp)		# tab[5]=a1^a4
	xor	$a4,$a12
	mov	$a2,48(%rsp)		# tab[6]=a2^a4
	 xor	$a48,$a1		# a1^a4^a4^a8=a1^a8
	mov	$a12,56(%rsp)		# tab[7]=a1^a2^a4
	 xor	$a48,$a2		# a2^a4^a4^a8=a1^a8

	mov	$a8,64(%rsp)		# tab[8]=a8
	xor	$a48,$a12		# a1^a2^a4^a4^a8=a1^a2^a8
	mov	$a1,72(%rsp)		# tab[9]=a1^a8
	 xor	$a4,$a1			# a1^a8^a4
	mov	$a2,80(%rsp)		# tab[10]=a2^a8
	 xor	$a4,$a2			# a2^a8^a4
	mov	$a12,88(%rsp)		# tab[11]=a1^a2^a8

	xor	$a4,$a12		# a1^a2^a8^a4
	mov	$a48,96(%rsp)		# tab[12]=a4^a8
	 mov	$mask,$i0
	mov	$a1,104(%rsp)		# tab[13]=a1^a4^a8
	 and	$b,$i0
	mov	$a2,112(%rsp)		# tab[14]=a2^a4^a8
	 shr	\$4,$b
	mov	$a12,120(%rsp)		# tab[15]=a1^a2^a4^a8
	 mov	$mask,$i1
	 and	$b,$i1
	 shr	\$4,$b

	movq	(%rsp,$i0,8),$R		# half of calculations is done in SSE2
	mov	$mask,$i0
	and	$b,$i0
	shr	\$4,$b
___
    for ($n=1;$n<8;$n++) {
	$code.=<<___;
	mov	(%rsp,$i1,8),$t1
	mov	$mask,$i1
	mov	$t1,$t0
	shl	\$`8*$n-4`,$t1
	and	$b,$i1
	 movq	(%rsp,$i0,8),$Tx
	shr	\$`64-(8*$n-4)`,$t0
	xor	$t1,$lo
	 pslldq	\$$n,$Tx
	 mov	$mask,$i0
	shr	\$4,$b
	xor	$t0,$hi
	 and	$b,$i0
	 shr	\$4,$b
	 pxor	$Tx,$R
___
    }
$code.=<<___;
	mov	(%rsp,$i1,8),$t1
	mov	$t1,$t0
	shl	\$`8*$n-4`,$t1
	movq	$R,$i0
	shr	\$`64-(8*$n-4)`,$t0
	xor	$t1,$lo
	psrldq	\$8,$R
	xor	$t0,$hi
	movq	$R,$i1
	xor	$i0,$lo
	xor	$i1,$hi

	add	\$128+8,%rsp
	ret
A
Andy Polyakov 已提交
157
.Lend_mul_1x1:
158 159 160 161 162 163 164 165 166 167 168 169 170 171
.size	_mul_1x1,.-_mul_1x1
___

($rp,$a1,$a0,$b1,$b0) = $win64?	("%rcx","%rdx","%r8", "%r9","%r10") :	# Win64 order
				("%rdi","%rsi","%rdx","%rcx","%r8");	# Unix order

$code.=<<___;
.extern	OPENSSL_ia32cap_P
.globl	bn_GF2m_mul_2x2
.type	bn_GF2m_mul_2x2,\@abi-omnipotent
.align	16
bn_GF2m_mul_2x2:
	mov	OPENSSL_ia32cap_P(%rip),%rax
	bt	\$33,%rax
A
Andy Polyakov 已提交
172
	jnc	.Lvanilla_mul_2x2
173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192

	movq		$a1,%xmm0
	movq		$b1,%xmm1
	movq		$a0,%xmm2
___
$code.=<<___ if ($win64);
	movq		40(%rsp),%xmm3
___
$code.=<<___ if (!$win64);
	movq		$b0,%xmm3
___
$code.=<<___;
	movdqa		%xmm0,%xmm4
	movdqa		%xmm1,%xmm5
	pclmulqdq	\$0,%xmm1,%xmm0	# a1b1
	pxor		%xmm2,%xmm4
	pxor		%xmm3,%xmm5
	pclmulqdq	\$0,%xmm3,%xmm2	# a0b0
	pclmulqdq	\$0,%xmm5,%xmm4	# (a0+a1)(b0+b1)
	xorps		%xmm0,%xmm4
A
Andy Polyakov 已提交
193
	xorps		%xmm2,%xmm4	# (a0+a1)(b0+b1)-a0b0-a1b1
194 195 196 197 198 199 200 201 202 203
	movdqa		%xmm4,%xmm5
	pslldq		\$8,%xmm4
	psrldq		\$8,%xmm5
	pxor		%xmm4,%xmm2
	pxor		%xmm5,%xmm0
	movdqu		%xmm2,0($rp)
	movdqu		%xmm0,16($rp)
	ret

.align	16
A
Andy Polyakov 已提交
204
.Lvanilla_mul_2x2:
205 206 207 208 209 210 211 212 213 214 215 216 217
	lea	-8*17(%rsp),%rsp
___
$code.=<<___ if ($win64);
	mov	`8*17+40`(%rsp),$b0
	mov	%rdi,8*15(%rsp)
	mov	%rsi,8*16(%rsp)
___
$code.=<<___;
	mov	%r14,8*10(%rsp)
	mov	%r13,8*11(%rsp)
	mov	%r12,8*12(%rsp)
	mov	%rbp,8*13(%rsp)
	mov	%rbx,8*14(%rsp)
A
Andy Polyakov 已提交
218
.Lbody_mul_2x2:
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
	mov	$rp,32(%rsp)		# save the arguments
	mov	$a1,40(%rsp)
	mov	$a0,48(%rsp)
	mov	$b1,56(%rsp)
	mov	$b0,64(%rsp)

	mov	\$0xf,$mask
	mov	$a1,$a
	mov	$b1,$b
	call	_mul_1x1		# a1b1
	mov	$lo,16(%rsp)
	mov	$hi,24(%rsp)

	mov	48(%rsp),$a
	mov	64(%rsp),$b
	call	_mul_1x1		# a0b0
	mov	$lo,0(%rsp)
	mov	$hi,8(%rsp)

	mov	40(%rsp),$a
	mov	56(%rsp),$b
	xor	48(%rsp),$a
	xor	64(%rsp),$b
	call	_mul_1x1		# (a0+a1)(b0+b1)
___
	@r=("%rbx","%rcx","%rdi","%rsi");
$code.=<<___;
	mov	0(%rsp),@r[0]
	mov	8(%rsp),@r[1]
	mov	16(%rsp),@r[2]
	mov	24(%rsp),@r[3]
	mov	32(%rsp),%rbp

	xor	$hi,$lo
	xor	@r[1],$hi
	xor	@r[0],$lo
	mov	@r[0],0(%rbp)
	xor	@r[2],$hi
	mov	@r[3],24(%rbp)
	xor	@r[3],$lo
	xor	@r[3],$hi
	xor	$hi,$lo
	mov	$hi,16(%rbp)
	mov	$lo,8(%rbp)

	mov	8*10(%rsp),%r14
	mov	8*11(%rsp),%r13
	mov	8*12(%rsp),%r12
	mov	8*13(%rsp),%rbp
	mov	8*14(%rsp),%rbx
___
$code.=<<___ if ($win64);
	mov	8*15(%rsp),%rdi
	mov	8*16(%rsp),%rsi
___
$code.=<<___;
	lea	8*17(%rsp),%rsp
	ret
A
Andy Polyakov 已提交
277
.Lend_mul_2x2:
278 279
.size	bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2
.asciz	"GF(2^m) Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
A
Andy Polyakov 已提交
280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
.align	16
___

# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
#               CONTEXT *context,DISPATCHER_CONTEXT *disp)
if ($win64) {
$rec="%rcx";
$frame="%rdx";
$context="%r8";
$disp="%r9";

$code.=<<___;
.extern __imp_RtlVirtualUnwind

.type	se_handler,\@abi-omnipotent
.align	16
se_handler:
	push	%rsi
	push	%rdi
	push	%rbx
	push	%rbp
	push	%r12
	push	%r13
	push	%r14
	push	%r15
	pushfq
	sub	\$64,%rsp

	mov	152($context),%rax	# pull context->Rsp
	mov	248($context),%rbx	# pull context->Rip

	lea	.Lbody_mul_2x2(%rip),%r10
	cmp	%r10,%rbx		# context->Rip<"prologue" label
	jb	.Lin_prologue

	mov	8*10(%rax),%r14		# mimic epilogue
	mov	8*11(%rax),%r13
	mov	8*12(%rax),%r12
	mov	8*13(%rax),%rbp
	mov	8*14(%rax),%rbx
	mov	8*15(%rax),%rdi
	mov	8*16(%rax),%rsi

	mov	%rbx,144($context)	# restore context->Rbx
	mov	%rbp,160($context)	# restore context->Rbp
	mov	%rsi,168($context)	# restore context->Rsi
	mov	%rdi,176($context)	# restore context->Rdi
	mov	%r12,216($context)	# restore context->R12
	mov	%r13,224($context)	# restore context->R13
	mov	%r14,232($context)	# restore context->R14

.Lin_prologue:
	lea	8*17(%rax),%rax
	mov	%rax,152($context)	# restore context->Rsp

	mov	40($disp),%rdi		# disp->ContextRecord
	mov	$context,%rsi		# context
	mov	\$154,%ecx		# sizeof(CONTEXT)
	.long	0xa548f3fc		# cld; rep movsq

	mov	$disp,%rsi
	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
	mov	0(%rsi),%r8		# arg3, disp->ControlPc
	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
	mov	40(%rsi),%r10		# disp->ContextRecord
	lea	56(%rsi),%r11		# &disp->HandlerData
	lea	24(%rsi),%r12		# &disp->EstablisherFrame
	mov	%r10,32(%rsp)		# arg5
	mov	%r11,40(%rsp)		# arg6
	mov	%r12,48(%rsp)		# arg7
	mov	%rcx,56(%rsp)		# arg8, (NULL)
	call	*__imp_RtlVirtualUnwind(%rip)

	mov	\$1,%eax		# ExceptionContinueSearch
	add	\$64,%rsp
	popfq
	pop	%r15
	pop	%r14
	pop	%r13
	pop	%r12
	pop	%rbp
	pop	%rbx
	pop	%rdi
	pop	%rsi
	ret
.size	se_handler,.-se_handler

.section	.pdata
.align	4
	.rva	_mul_1x1
	.rva	.Lend_mul_1x1
	.rva	.LSEH_info_1x1

	.rva	.Lvanilla_mul_2x2
	.rva	.Lend_mul_2x2
	.rva	.LSEH_info_2x2
.section	.xdata
.align	8
.LSEH_info_1x1:
	.byte	0x01,0x07,0x02,0x00
	.byte	0x07,0x01,0x11,0x00	# sub rsp,128+8
.LSEH_info_2x2:
	.byte	9,0,0,0
	.rva	se_handler
385
___
A
Andy Polyakov 已提交
386
}
387 388 389 390

$code =~ s/\`([^\`]*)\`/eval($1)/gem;
print $code;
close STDOUT;