aes-ce-ccm-core.S 5.9 KB
Newer Older
1 2 3
/*
 * aesce-ccm-core.S - AES-CCM transform for ARMv8 with Crypto Extensions
 *
4
 * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
5 6 7 8 9 10 11
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/linkage.h>
12
#include <asm/assembler.h>
13 14 15 16 17 18 19 20 21

	.text
	.arch	armv8-a+crypto

	/*
	 * void ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
	 *			     u32 *macp, u8 const rk[], u32 rounds);
	 */
ENTRY(ce_aes_ccm_auth_data)
22
	ldr	w8, [x3]			/* leftover from prev round? */
23
	ld1	{v0.16b}, [x0]			/* load mac */
24 25
	cbz	w8, 1f
	sub	w8, w8, #16
26
	eor	v1.16b, v1.16b, v1.16b
27 28 29
0:	ldrb	w7, [x1], #1			/* get 1 byte of input */
	subs	w2, w2, #1
	add	w8, w8, #1
30 31 32
	ins	v1.b[0], w7
	ext	v1.16b, v1.16b, v1.16b, #1	/* rotate in the input bytes */
	beq	8f				/* out of input? */
33
	cbnz	w8, 0b
34
	eor	v0.16b, v0.16b, v1.16b
35 36 37 38 39
1:	ld1	{v3.4s}, [x4]			/* load first round key */
	prfm	pldl1strm, [x1]
	cmp	w5, #12				/* which key size? */
	add	x6, x4, #16
	sub	w7, w5, #2			/* modified # of rounds */
40 41 42 43 44
	bmi	2f
	bne	5f
	mov	v5.16b, v3.16b
	b	4f
2:	mov	v4.16b, v3.16b
45
	ld1	{v5.4s}, [x6], #16		/* load 2nd round key */
46 47
3:	aese	v0.16b, v4.16b
	aesmc	v0.16b, v0.16b
48
4:	ld1	{v3.4s}, [x6], #16		/* load next round key */
49 50
	aese	v0.16b, v5.16b
	aesmc	v0.16b, v0.16b
51
5:	ld1	{v4.4s}, [x6], #16		/* load next round key */
52 53 54
	subs	w7, w7, #3
	aese	v0.16b, v3.16b
	aesmc	v0.16b, v0.16b
55
	ld1	{v5.4s}, [x6], #16		/* load next round key */
56 57
	bpl	3b
	aese	v0.16b, v4.16b
58
	subs	w2, w2, #16			/* last data? */
59 60
	eor	v0.16b, v0.16b, v5.16b		/* final round */
	bmi	6f
61
	ld1	{v1.16b}, [x1], #16		/* load next input block */
62
	eor	v0.16b, v0.16b, v1.16b		/* xor with mac */
63 64
	bne	1b
6:	st1	{v0.16b}, [x0]			/* store mac */
65
	beq	10f
66
	adds	w2, w2, #16
67
	beq	10f
68 69
	mov	w8, w2
7:	ldrb	w7, [x1], #1
70 71
	umov	w6, v0.b[0]
	eor	w6, w6, w7
72 73
	strb	w6, [x0], #1
	subs	w2, w2, #1
74 75 76
	beq	10f
	ext	v0.16b, v0.16b, v0.16b, #1	/* rotate out the mac bytes */
	b	7b
77 78
8:	mov	w7, w8
	add	w8, w8, #16
79 80 81 82
9:	ext	v1.16b, v1.16b, v1.16b, #1
	adds	w7, w7, #1
	bne	9b
	eor	v0.16b, v0.16b, v1.16b
83 84
	st1	{v0.16b}, [x0]
10:	str	w8, [x3]
85 86 87 88 89 90 91 92
	ret
ENDPROC(ce_aes_ccm_auth_data)

	/*
	 * void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u8 const rk[],
	 * 			 u32 rounds);
	 */
ENTRY(ce_aes_ccm_final)
93
	ld1	{v3.4s}, [x2], #16		/* load first round key */
94
	ld1	{v0.16b}, [x0]			/* load mac */
95 96
	cmp	w3, #12				/* which key size? */
	sub	w3, w3, #2			/* modified # of rounds */
97
	ld1	{v1.16b}, [x1]			/* load 1st ctriv */
98 99 100 101 102
	bmi	0f
	bne	3f
	mov	v5.16b, v3.16b
	b	2f
0:	mov	v4.16b, v3.16b
103
1:	ld1	{v5.4s}, [x2], #16		/* load next round key */
104 105
	aese	v0.16b, v4.16b
	aesmc	v0.16b, v0.16b
106
	aese	v1.16b, v4.16b
107
	aesmc	v1.16b, v1.16b
108
2:	ld1	{v3.4s}, [x2], #16		/* load next round key */
109 110
	aese	v0.16b, v5.16b
	aesmc	v0.16b, v0.16b
111
	aese	v1.16b, v5.16b
112
	aesmc	v1.16b, v1.16b
113
3:	ld1	{v4.4s}, [x2], #16		/* load next round key */
114 115 116
	subs	w3, w3, #3
	aese	v0.16b, v3.16b
	aesmc	v0.16b, v0.16b
117
	aese	v1.16b, v3.16b
118 119 120 121 122 123
	aesmc	v1.16b, v1.16b
	bpl	1b
	aese	v0.16b, v4.16b
	aese	v1.16b, v4.16b
	/* final round key cancels out */
	eor	v0.16b, v0.16b, v1.16b		/* en-/decrypt the mac */
124
	st1	{v0.16b}, [x0]			/* store result */
125 126 127 128
	ret
ENDPROC(ce_aes_ccm_final)

	.macro	aes_ccm_do_crypt,enc
129 130 131
	ldr	x8, [x6, #8]			/* load lower ctr */
	ld1	{v0.16b}, [x5]			/* load mac */
CPU_LE(	rev	x8, x8			)	/* keep swabbed ctr in reg */
132
0:	/* outer loop */
133 134 135 136 137 138
	ld1	{v1.8b}, [x6]			/* load upper ctr */
	prfm	pldl1strm, [x1]
	add	x8, x8, #1
	rev	x9, x8
	cmp	w4, #12				/* which key size? */
	sub	w7, w4, #2			/* get modified # of rounds */
139
	ins	v1.d[1], x9			/* no carry in lower ctr */
140 141
	ld1	{v3.4s}, [x3]			/* load first round key */
	add	x10, x3, #16
142 143 144 145 146
	bmi	1f
	bne	4f
	mov	v5.16b, v3.16b
	b	3f
1:	mov	v4.16b, v3.16b
147
	ld1	{v5.4s}, [x10], #16		/* load 2nd round key */
148 149 150
2:	/* inner loop: 3 rounds, 2x interleaved */
	aese	v0.16b, v4.16b
	aesmc	v0.16b, v0.16b
151
	aese	v1.16b, v4.16b
152
	aesmc	v1.16b, v1.16b
153
3:	ld1	{v3.4s}, [x10], #16		/* load next round key */
154 155
	aese	v0.16b, v5.16b
	aesmc	v0.16b, v0.16b
156
	aese	v1.16b, v5.16b
157
	aesmc	v1.16b, v1.16b
158
4:	ld1	{v4.4s}, [x10], #16		/* load next round key */
159 160 161
	subs	w7, w7, #3
	aese	v0.16b, v3.16b
	aesmc	v0.16b, v0.16b
162
	aese	v1.16b, v3.16b
163
	aesmc	v1.16b, v1.16b
164
	ld1	{v5.4s}, [x10], #16		/* load next round key */
165 166 167
	bpl	2b
	aese	v0.16b, v4.16b
	aese	v1.16b, v4.16b
168 169 170
	subs	w2, w2, #16
	bmi	6f				/* partial block? */
	ld1	{v2.16b}, [x1], #16		/* load next input block */
171 172 173 174 175 176 177 178
	.if	\enc == 1
	eor	v2.16b, v2.16b, v5.16b		/* final round enc+mac */
	eor	v1.16b, v1.16b, v2.16b		/* xor with crypted ctr */
	.else
	eor	v2.16b, v2.16b, v1.16b		/* xor with crypted ctr */
	eor	v1.16b, v2.16b, v5.16b		/* final round enc */
	.endif
	eor	v0.16b, v0.16b, v2.16b		/* xor mac with pt ^ rk[last] */
179 180 181 182 183 184 185 186
	st1	{v1.16b}, [x0], #16		/* write output block */
	bne	0b
CPU_LE(	rev	x8, x8			)
	st1	{v0.16b}, [x5]			/* store mac */
	str	x8, [x6, #8]			/* store lsb end of ctr (BE) */
5:	ret

6:	eor	v0.16b, v0.16b, v5.16b		/* final round mac */
187
	eor	v1.16b, v1.16b, v5.16b		/* final round enc */
188 189 190
	st1	{v0.16b}, [x5]			/* store mac */
	add	w2, w2, #16			/* process partial tail block */
7:	ldrb	w9, [x1], #1			/* get 1 byte of input */
191 192 193 194 195 196 197 198 199
	umov	w6, v1.b[0]			/* get top crypted ctr byte */
	umov	w7, v0.b[0]			/* get top mac byte */
	.if	\enc == 1
	eor	w7, w7, w9
	eor	w9, w9, w6
	.else
	eor	w9, w9, w6
	eor	w7, w7, w9
	.endif
200 201 202 203
	strb	w9, [x0], #1			/* store out byte */
	strb	w7, [x5], #1			/* store mac byte */
	subs	w2, w2, #1
	beq	5b
204 205
	ext	v0.16b, v0.16b, v0.16b, #1	/* shift out mac byte */
	ext	v1.16b, v1.16b, v1.16b, #1	/* shift out ctr byte */
206
	b	7b
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
	.endm

	/*
	 * void ce_aes_ccm_encrypt(u8 out[], u8 const in[], u32 cbytes,
	 * 			   u8 const rk[], u32 rounds, u8 mac[],
	 * 			   u8 ctr[]);
	 * void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes,
	 * 			   u8 const rk[], u32 rounds, u8 mac[],
	 * 			   u8 ctr[]);
	 */
ENTRY(ce_aes_ccm_encrypt)
	aes_ccm_do_crypt	1
ENDPROC(ce_aes_ccm_encrypt)

ENTRY(ce_aes_ccm_decrypt)
	aes_ccm_do_crypt	0
ENDPROC(ce_aes_ccm_decrypt)