cast6-avx-x86_64-asm_64.S 10.8 KB
Newer Older
1 2 3 4 5 6
/*
 * Cast6 Cipher 8-way parallel algorithm (AVX/x86_64)
 *
 * Copyright (C) 2012 Johannes Goetzfried
 *     <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
 *
7
 * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
8
 *
9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
 * USA
 *
 */

26
#include <linux/linkage.h>
27
#include <asm/frame.h>
28 29
#include "glue_helper-asm-avx.S"

30 31
.file "cast6-avx-x86_64-asm_64.S"

32 33 34 35
.extern cast_s1
.extern cast_s2
.extern cast_s3
.extern cast_s4
36 37 38 39 40 41

/* structure of crypto context */
#define km	0
#define kr	(12*4*4)

/* s-boxes */
42 43 44 45
#define s1	cast_s1
#define s2	cast_s2
#define s3	cast_s3
#define s4	cast_s4
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61

/**********************************************************************
  8-way AVX cast6
 **********************************************************************/
#define CTX %rdi

#define RA1 %xmm0
#define RB1 %xmm1
#define RC1 %xmm2
#define RD1 %xmm3

#define RA2 %xmm4
#define RB2 %xmm5
#define RC2 %xmm6
#define RD2 %xmm7

62
#define RX  %xmm8
63 64

#define RKM  %xmm9
65 66 67 68 69
#define RKR  %xmm10
#define RKRF %xmm11
#define RKRR %xmm12
#define R32  %xmm13
#define R1ST %xmm14
70

71
#define RTMP %xmm15
72

73 74 75 76
#define RID1  %rbp
#define RID1d %ebp
#define RID2  %rsi
#define RID2d %esi
77 78 79 80 81 82 83 84

#define RGI1   %rdx
#define RGI1bl %dl
#define RGI1bh %dh
#define RGI2   %rcx
#define RGI2bl %cl
#define RGI2bh %ch

85 86 87 88 89 90 91
#define RGI3   %rax
#define RGI3bl %al
#define RGI3bh %ah
#define RGI4   %rbx
#define RGI4bl %bl
#define RGI4bh %bh

92 93 94 95 96 97 98 99
#define RFS1  %r8
#define RFS1d %r8d
#define RFS2  %r9
#define RFS2d %r9d
#define RFS3  %r10
#define RFS3d %r10d


100 101 102 103
#define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
	movzbl		src ## bh,     RID1d;    \
	movzbl		src ## bl,     RID2d;    \
	shrq $16,	src;                     \
104 105
	movl		s1(, RID1, 4), dst ## d; \
	op1		s2(, RID2, 4), dst ## d; \
106 107 108
	movzbl		src ## bh,     RID1d;    \
	movzbl		src ## bl,     RID2d;    \
	interleave_op(il_reg);			 \
109 110 111
	op2		s3(, RID1, 4), dst ## d; \
	op3		s4(, RID2, 4), dst ## d;

112 113 114 115 116 117
#define dummy(d) /* do nothing */

#define shr_next(reg) \
	shrq $16,	reg;

#define F_head(a, x, gi1, gi2, op0) \
118
	op0	a,	RKM,  x;                 \
119 120
	vpslld	RKRF,	x,    RTMP;              \
	vpsrld	RKRR,	x,    x;                 \
121 122
	vpor	RTMP,	x,    x;                 \
	\
123 124 125 126 127 128
	vmovq		x,    gi1;               \
	vpextrq $1,	x,    gi2;

#define F_tail(a, x, gi1, gi2, op1, op2, op3) \
	lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \
	lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \
129
	\
130 131 132 133 134 135
	lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none);     \
	shlq $32,	RFS2;                                      \
	orq		RFS1, RFS2;                                \
	lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none);     \
	shlq $32,	RFS1;                                      \
	orq		RFS1, RFS3;                                \
136
	\
137
	vmovq		RFS2, x;                                   \
138 139
	vpinsrq $1,	RFS3, x, x;

140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
#define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \
	F_head(b1, RX, RGI1, RGI2, op0);              \
	F_head(b2, RX, RGI3, RGI4, op0);              \
	\
	F_tail(b1, RX, RGI1, RGI2, op1, op2, op3);    \
	F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3);  \
	\
	vpxor		a1, RX,   a1;                 \
	vpxor		a2, RTMP, a2;

#define F1_2(a1, b1, a2, b2) \
	F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl)
#define F2_2(a1, b1, a2, b2) \
	F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl)
#define F3_2(a1, b1, a2, b2) \
	F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
156

157 158 159 160 161 162 163 164
#define qop(in, out, f) \
	F ## f ## _2(out ## 1, in ## 1, out ## 2, in ## 2);

#define get_round_keys(nn) \
	vbroadcastss	(km+(4*(nn)))(CTX), RKM;        \
	vpand		R1ST,               RKR,  RKRF; \
	vpsubq		RKRF,               R32,  RKRR; \
	vpsrldq $1,	RKR,                RKR;
165 166

#define Q(n) \
167 168
	get_round_keys(4*n+0); \
	qop(RD, RC, 1);        \
169
	\
170 171
	get_round_keys(4*n+1); \
	qop(RC, RB, 2);        \
172
	\
173 174
	get_round_keys(4*n+2); \
	qop(RB, RA, 3);        \
175
	\
176 177
	get_round_keys(4*n+3); \
	qop(RA, RD, 1);
178 179

#define QBAR(n) \
180 181
	get_round_keys(4*n+3); \
	qop(RA, RD, 1);        \
182
	\
183 184
	get_round_keys(4*n+2); \
	qop(RB, RA, 3);        \
185
	\
186 187
	get_round_keys(4*n+1); \
	qop(RC, RB, 2);        \
188
	\
189 190 191 192 193
	get_round_keys(4*n+0); \
	qop(RD, RC, 1);

#define shuffle(mask) \
	vpshufb		mask,            RKR, RKR;
194

195 196 197 198 199
#define preload_rkr(n, do_mask, mask) \
	vbroadcastss	.L16_mask,                RKR;      \
	/* add 16-bit rotation to key rotations (mod 32) */ \
	vpxor		(kr+n*16)(CTX),           RKR, RKR; \
	do_mask(mask);
200 201 202 203 204 205 206 207 208 209 210 211

#define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
	vpunpckldq		x1, x0, t0; \
	vpunpckhdq		x1, x0, t2; \
	vpunpckldq		x3, x2, t1; \
	vpunpckhdq		x3, x2, x3; \
	\
	vpunpcklqdq		t1, t0, x0; \
	vpunpckhqdq		t1, t0, x1; \
	vpunpcklqdq		x3, t2, x2; \
	vpunpckhqdq		x3, t2, x3;

212
#define inpack_blocks(x0, x1, x2, x3, t0, t1, t2, rmask) \
213 214 215 216
	vpshufb rmask, x0,	x0; \
	vpshufb rmask, x1,	x1; \
	vpshufb rmask, x2,	x2; \
	vpshufb rmask, x3,	x3; \
217 218 219
	\
	transpose_4x4(x0, x1, x2, x3, t0, t1, t2)

220
#define outunpack_blocks(x0, x1, x2, x3, t0, t1, t2, rmask) \
221 222
	transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
	\
223 224 225
	vpshufb rmask,		x0, x0;       \
	vpshufb rmask,		x1, x1;       \
	vpshufb rmask,		x2, x2;       \
226
	vpshufb rmask,		x3, x3;
227

228
.section	.rodata.cst16, "aM", @progbits, 16
229
.align 16
230 231
.Lxts_gf128mul_and_shl1_mask:
	.byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
232 233
.Lbswap_mask:
	.byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
234 235
.Lbswap128_mask:
	.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
236 237 238 239 240 241 242 243 244 245
.Lrkr_enc_Q_Q_QBAR_QBAR:
	.byte 0, 1, 2, 3, 4, 5, 6, 7, 11, 10, 9, 8, 15, 14, 13, 12
.Lrkr_enc_QBAR_QBAR_QBAR_QBAR:
	.byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
.Lrkr_dec_Q_Q_Q_Q:
	.byte 12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3
.Lrkr_dec_Q_Q_QBAR_QBAR:
	.byte 12, 13, 14, 15, 8, 9, 10, 11, 7, 6, 5, 4, 3, 2, 1, 0
.Lrkr_dec_QBAR_QBAR_QBAR_QBAR:
	.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
246 247 248

.section	.rodata.cst4.L16_mask, "aM", @progbits, 4
.align 4
249 250
.L16_mask:
	.byte 16, 16, 16, 16
251 252 253

.section	.rodata.cst4.L32_mask, "aM", @progbits, 4
.align 4
254
.L32_mask:
255
	.byte 32, 0, 0, 0
256 257 258

.section	.rodata.cst4.first_mask, "aM", @progbits, 4
.align 4
259 260 261 262
.Lfirst_mask:
	.byte 0x1f, 0, 0, 0

.text
263

264 265
.align 8
__cast6_enc_blk8:
266 267
	/* input:
	 *	%rdi: ctx, CTX
268 269 270
	 *	RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks
	 * output:
	 *	RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks
271 272
	 */

273
	pushq %rbp;
274 275
	pushq %rbx;

276 277 278
	vmovdqa .Lbswap_mask, RKM;
	vmovd .Lfirst_mask, R1ST;
	vmovd .L32_mask, R32;
279

280 281
	inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
	inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
282

283
	preload_rkr(0, dummy, none);
284 285 286 287
	Q(0);
	Q(1);
	Q(2);
	Q(3);
288
	preload_rkr(1, shuffle, .Lrkr_enc_Q_Q_QBAR_QBAR);
289 290 291 292
	Q(4);
	Q(5);
	QBAR(6);
	QBAR(7);
293
	preload_rkr(2, shuffle, .Lrkr_enc_QBAR_QBAR_QBAR_QBAR);
294 295 296 297 298 299
	QBAR(8);
	QBAR(9);
	QBAR(10);
	QBAR(11);

	popq %rbx;
300
	popq %rbp;
301

302
	vmovdqa .Lbswap_mask, RKM;
303

304 305
	outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
	outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
306 307

	ret;
308
ENDPROC(__cast6_enc_blk8)
309

310 311
.align 8
__cast6_dec_blk8:
312 313
	/* input:
	 *	%rdi: ctx, CTX
314 315 316
	 *	RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks
	 * output:
	 *	RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: decrypted blocks
317 318
	 */

319
	pushq %rbp;
320 321
	pushq %rbx;

322 323 324
	vmovdqa .Lbswap_mask, RKM;
	vmovd .Lfirst_mask, R1ST;
	vmovd .L32_mask, R32;
325

326 327
	inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
	inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
328

329
	preload_rkr(2, shuffle, .Lrkr_dec_Q_Q_Q_Q);
330 331 332 333
	Q(11);
	Q(10);
	Q(9);
	Q(8);
334
	preload_rkr(1, shuffle, .Lrkr_dec_Q_Q_QBAR_QBAR);
335 336 337 338
	Q(7);
	Q(6);
	QBAR(5);
	QBAR(4);
339
	preload_rkr(0, shuffle, .Lrkr_dec_QBAR_QBAR_QBAR_QBAR);
340 341 342 343 344 345
	QBAR(3);
	QBAR(2);
	QBAR(1);
	QBAR(0);

	popq %rbx;
346
	popq %rbp;
347

348
	vmovdqa .Lbswap_mask, RKM;
349 350 351 352
	outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
	outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);

	ret;
353
ENDPROC(__cast6_dec_blk8)
354

355
ENTRY(cast6_ecb_enc_8way)
356 357 358 359 360
	/* input:
	 *	%rdi: ctx, CTX
	 *	%rsi: dst
	 *	%rdx: src
	 */
361
	FRAME_BEGIN
362 363 364 365 366 367 368 369 370

	movq %rsi, %r11;

	load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);

	call __cast6_enc_blk8;

	store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);

371
	FRAME_END
372
	ret;
373
ENDPROC(cast6_ecb_enc_8way)
374

375
ENTRY(cast6_ecb_dec_8way)
376 377 378 379 380
	/* input:
	 *	%rdi: ctx, CTX
	 *	%rsi: dst
	 *	%rdx: src
	 */
381
	FRAME_BEGIN
382 383 384 385 386 387 388 389 390

	movq %rsi, %r11;

	load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);

	call __cast6_dec_blk8;

	store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);

391
	FRAME_END
392
	ret;
393
ENDPROC(cast6_ecb_dec_8way)
394

395
ENTRY(cast6_cbc_dec_8way)
396 397 398 399 400
	/* input:
	 *	%rdi: ctx, CTX
	 *	%rsi: dst
	 *	%rdx: src
	 */
401
	FRAME_BEGIN
402 403 404 405 406 407 408 409 410 411 412 413 414 415

	pushq %r12;

	movq %rsi, %r11;
	movq %rdx, %r12;

	load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);

	call __cast6_dec_blk8;

	store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);

	popq %r12;

416
	FRAME_END
417
	ret;
418
ENDPROC(cast6_cbc_dec_8way)
419

420
ENTRY(cast6_ctr_8way)
421 422 423 424 425 426
	/* input:
	 *	%rdi: ctx, CTX
	 *	%rsi: dst
	 *	%rdx: src
	 *	%rcx: iv (little endian, 128bit)
	 */
427
	FRAME_BEGIN
428 429 430 431 432 433 434 435 436 437 438 439 440 441

	pushq %r12;

	movq %rsi, %r11;
	movq %rdx, %r12;

	load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
		      RD2, RX, RKR, RKM);

	call __cast6_enc_blk8;

	store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);

	popq %r12;
442

443
	FRAME_END
444
	ret;
445
ENDPROC(cast6_ctr_8way)
446 447 448 449 450 451 452 453

ENTRY(cast6_xts_enc_8way)
	/* input:
	 *	%rdi: ctx, CTX
	 *	%rsi: dst
	 *	%rdx: src
	 *	%rcx: iv (t  αⁿ  GF(2¹²⁸))
	 */
454
	FRAME_BEGIN
455 456 457 458 459 460 461 462 463 464 465 466

	movq %rsi, %r11;

	/* regs <= src, dst <= IVs, regs <= regs xor IVs */
	load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
		      RX, RKR, RKM, .Lxts_gf128mul_and_shl1_mask);

	call __cast6_enc_blk8;

	/* dst <= regs xor IVs(in dst) */
	store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);

467
	FRAME_END
468 469 470 471 472 473 474 475 476 477
	ret;
ENDPROC(cast6_xts_enc_8way)

ENTRY(cast6_xts_dec_8way)
	/* input:
	 *	%rdi: ctx, CTX
	 *	%rsi: dst
	 *	%rdx: src
	 *	%rcx: iv (t  αⁿ  GF(2¹²⁸))
	 */
478
	FRAME_BEGIN
479 480 481 482 483 484 485 486 487 488 489 490

	movq %rsi, %r11;

	/* regs <= src, dst <= IVs, regs <= regs xor IVs */
	load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
		      RX, RKR, RKM, .Lxts_gf128mul_and_shl1_mask);

	call __cast6_dec_blk8;

	/* dst <= regs xor IVs(in dst) */
	store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);

491
	FRAME_END
492 493
	ret;
ENDPROC(cast6_xts_dec_8way)