padlock-aes.c 15.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9
/* 
 * Cryptographic API.
 *
 * Support for VIA PadLock hardware crypto engine.
 *
 * Copyright (c) 2004  Michal Ludvig <michal@logix.cz>
 *
 */

10
#include <crypto/algapi.h>
11
#include <crypto/aes.h>
L
Linus Torvalds 已提交
12 13 14 15 16
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
17
#include <linux/kernel.h>
18 19
#include <linux/percpu.h>
#include <linux/smp.h>
L
Linus Torvalds 已提交
20
#include <asm/byteorder.h>
21
#include <asm/processor.h>
22
#include <asm/i387.h>
L
Linus Torvalds 已提交
23 24
#include "padlock.h"

25 26 27 28
/*
 * Number of data blocks actually fetched for each xcrypt insn.
 * Processors with prefetch errata will fetch extra blocks.
 */
29
static unsigned int ecb_fetch_blocks = 2;
30
#define MAX_ECB_FETCH_BLOCKS (8)
31
#define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE)
32 33 34

static unsigned int cbc_fetch_blocks = 1;
#define MAX_CBC_FETCH_BLOCKS (4)
35 36
#define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE)

37 38 39 40 41 42 43 44 45 46 47
/* Control word. */
struct cword {
	unsigned int __attribute__ ((__packed__))
		rounds:4,
		algo:3,
		keygen:1,
		interm:1,
		encdec:1,
		ksize:2;
} __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));

48 49
/* Whenever making any changes to the following
 * structure *make sure* you keep E, d_data
50 51 52 53 54
 * and cword aligned on 16 Bytes boundaries and
 * the Hardware can access 16 * 16 bytes of E and d_data
 * (only the first 15 * 16 bytes matter but the HW reads
 * more).
 */
L
Linus Torvalds 已提交
55
struct aes_ctx {
56 57 58 59
	u32 E[AES_MAX_KEYLENGTH_U32]
		__attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
	u32 d_data[AES_MAX_KEYLENGTH_U32]
		__attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
60 61 62 63
	struct {
		struct cword encrypt;
		struct cword decrypt;
	} cword;
64
	u32 *D;
L
Linus Torvalds 已提交
65 66
};

67 68
static DEFINE_PER_CPU(struct cword *, last_cword);

L
Linus Torvalds 已提交
69 70 71 72 73 74 75 76 77 78 79 80 81
/* Tells whether the ACE is capable to generate
   the extended key for a given key_len. */
static inline int
aes_hw_extkey_available(uint8_t key_len)
{
	/* TODO: We should check the actual CPU model/stepping
	         as it's possible that the capability will be
	         added in the next CPU revisions. */
	if (key_len == 16)
		return 1;
	return 0;
}

82
static inline struct aes_ctx *aes_ctx_common(void *ctx)
83
{
84
	unsigned long addr = (unsigned long)ctx;
85 86 87 88
	unsigned long align = PADLOCK_ALIGNMENT;

	if (align <= crypto_tfm_ctx_alignment())
		align = 1;
89
	return (struct aes_ctx *)ALIGN(addr, align);
90 91
}

92 93 94 95 96 97 98 99 100 101
static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
{
	return aes_ctx_common(crypto_tfm_ctx(tfm));
}

static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm)
{
	return aes_ctx_common(crypto_blkcipher_ctx(tfm));
}

102
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
103
		       unsigned int key_len)
L
Linus Torvalds 已提交
104
{
105
	struct aes_ctx *ctx = aes_ctx(tfm);
106
	const __le32 *key = (const __le32 *)in_key;
107
	u32 *flags = &tfm->crt_flags;
108
	struct crypto_aes_ctx gen_aes;
109
	int cpu;
L
Linus Torvalds 已提交
110

111
	if (key_len % 8) {
L
Linus Torvalds 已提交
112 113 114 115
		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
		return -EINVAL;
	}

116 117 118 119 120
	/*
	 * If the hardware is capable of generating the extended key
	 * itself we must supply the plain key for both encryption
	 * and decryption.
	 */
121
	ctx->D = ctx->E;
L
Linus Torvalds 已提交
122

123 124 125 126
	ctx->E[0] = le32_to_cpu(key[0]);
	ctx->E[1] = le32_to_cpu(key[1]);
	ctx->E[2] = le32_to_cpu(key[2]);
	ctx->E[3] = le32_to_cpu(key[3]);
L
Linus Torvalds 已提交
127

128 129 130 131 132 133 134 135 136
	/* Prepare control words. */
	memset(&ctx->cword, 0, sizeof(ctx->cword));

	ctx->cword.decrypt.encdec = 1;
	ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4;
	ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds;
	ctx->cword.encrypt.ksize = (key_len - 16) / 8;
	ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize;

L
Linus Torvalds 已提交
137 138
	/* Don't generate extended keys if the hardware can do it. */
	if (aes_hw_extkey_available(key_len))
139
		goto ok;
L
Linus Torvalds 已提交
140

141 142 143 144
	ctx->D = ctx->d_data;
	ctx->cword.encrypt.keygen = 1;
	ctx->cword.decrypt.keygen = 1;

145 146 147
	if (crypto_aes_expand_key(&gen_aes, in_key, key_len)) {
		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
		return -EINVAL;
L
Linus Torvalds 已提交
148 149
	}

150 151
	memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH);
	memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH);
152 153 154 155 156 157 158

ok:
	for_each_online_cpu(cpu)
		if (&ctx->cword.encrypt == per_cpu(last_cword, cpu) ||
		    &ctx->cword.decrypt == per_cpu(last_cword, cpu))
			per_cpu(last_cword, cpu) = NULL;

L
Linus Torvalds 已提交
159 160 161 162 163
	return 0;
}

/* ====== Encryption/decryption routines ====== */

164
/* These are the real call to PadLock. */
165 166 167 168 169
static inline void padlock_reset_key(struct cword *cword)
{
	int cpu = raw_smp_processor_id();

	if (cword != per_cpu(last_cword, cpu))
170
#ifndef CONFIG_X86_64
171
		asm volatile ("pushfl; popfl");
172 173 174
#else
		asm volatile ("pushfq; popfq");
#endif
175 176 177
}

static inline void padlock_store_cword(struct cword *cword)
178
{
179
	per_cpu(last_cword, raw_smp_processor_id()) = cword;
180 181
}

182 183 184 185 186 187
/*
 * While the padlock instructions don't use FP/SSE registers, they
 * generate a spurious DNA fault when cr0.ts is '1'. These instructions
 * should be used only inside the irq_ts_save/restore() context
 */

188
static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key,
189
				  struct cword *control_word, int count)
190 191 192
{
	asm volatile (".byte 0xf3,0x0f,0xa7,0xc8"	/* rep xcryptecb */
		      : "+S"(input), "+D"(output)
193
		      : "d"(control_word), "b"(key), "c"(count));
194 195
}

196 197 198 199 200 201 202 203 204 205
static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key,
				 u8 *iv, struct cword *control_word, int count)
{
	asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"	/* rep xcryptcbc */
		      : "+S" (input), "+D" (output), "+a" (iv)
		      : "d" (control_word), "b" (key), "c" (count));
	return iv;
}

static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key,
206
			   struct cword *cword, int count)
207
{
208 209 210 211
	/*
	 * Padlock prefetches extra data so we must provide mapped input buffers.
	 * Assume there are at least 16 bytes of stack already in use.
	 */
212
	u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
213
	u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
214

215
	memcpy(tmp, in, count * AES_BLOCK_SIZE);
216
	rep_xcrypt_ecb(tmp, out, key, cword, count);
217 218
}

219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key,
			   u8 *iv, struct cword *cword, int count)
{
	/*
	 * Padlock prefetches extra data so we must provide mapped input buffers.
	 * Assume there are at least 16 bytes of stack already in use.
	 */
	u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
	u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);

	memcpy(tmp, in, count * AES_BLOCK_SIZE);
	return rep_xcrypt_cbc(tmp, out, key, iv, cword, count);
}

static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key,
234
			     struct cword *cword, int count)
235
{
236 237 238 239
	/* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
	 * We could avoid some copying here but it's probably not worth it.
	 */
	if (unlikely(((unsigned long)in & PAGE_SIZE) + ecb_fetch_bytes > PAGE_SIZE)) {
240
		ecb_crypt_copy(in, out, key, cword, count);
241 242 243
		return;
	}

244 245 246 247 248 249 250 251 252 253 254
	rep_xcrypt_ecb(in, out, key, cword, count);
}

static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key,
			    u8 *iv, struct cword *cword, int count)
{
	/* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
	if (unlikely(((unsigned long)in & PAGE_SIZE) + cbc_fetch_bytes > PAGE_SIZE))
		return cbc_crypt_copy(in, out, key, iv, cword, count);

	return rep_xcrypt_cbc(in, out, key, iv, cword, count);
255 256
}

257 258
static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
				      void *control_word, u32 count)
L
Linus Torvalds 已提交
259
{
260 261 262
	u32 initial = count & (ecb_fetch_blocks - 1);

	if (count < ecb_fetch_blocks) {
263
		ecb_crypt(input, output, key, control_word, count);
264 265 266
		return;
	}

267 268 269 270 271 272
	if (initial)
		asm volatile (".byte 0xf3,0x0f,0xa7,0xc8"	/* rep xcryptecb */
			      : "+S"(input), "+D"(output)
			      : "d"(control_word), "b"(key), "c"(initial));

	asm volatile (".byte 0xf3,0x0f,0xa7,0xc8"	/* rep xcryptecb */
L
Linus Torvalds 已提交
273
		      : "+S"(input), "+D"(output)
274
		      : "d"(control_word), "b"(key), "c"(count - initial));
L
Linus Torvalds 已提交
275 276
}

277 278
static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
				     u8 *iv, void *control_word, u32 count)
279
{
280 281 282 283 284 285 286 287 288 289 290
	u32 initial = count & (cbc_fetch_blocks - 1);

	if (count < cbc_fetch_blocks)
		return cbc_crypt(input, output, key, iv, control_word, count);

	if (initial)
		asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"	/* rep xcryptcbc */
			      : "+S" (input), "+D" (output), "+a" (iv)
			      : "d" (control_word), "b" (key), "c" (count));

	asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"	/* rep xcryptcbc */
291
		      : "+S" (input), "+D" (output), "+a" (iv)
292
		      : "d" (control_word), "b" (key), "c" (count-initial));
293
	return iv;
294 295
}

296
static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
L
Linus Torvalds 已提交
297
{
298
	struct aes_ctx *ctx = aes_ctx(tfm);
299 300
	int ts_state;

301
	padlock_reset_key(&ctx->cword.encrypt);
302
	ts_state = irq_ts_save();
303
	ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1);
304
	irq_ts_restore(ts_state);
305
	padlock_store_cword(&ctx->cword.encrypt);
L
Linus Torvalds 已提交
306 307
}

308
static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
L
Linus Torvalds 已提交
309
{
310
	struct aes_ctx *ctx = aes_ctx(tfm);
311 312
	int ts_state;

313
	padlock_reset_key(&ctx->cword.encrypt);
314
	ts_state = irq_ts_save();
315
	ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1);
316
	irq_ts_restore(ts_state);
317
	padlock_store_cword(&ctx->cword.encrypt);
L
Linus Torvalds 已提交
318 319 320 321
}

static struct crypto_alg aes_alg = {
	.cra_name		=	"aes",
322
	.cra_driver_name	=	"aes-padlock",
323
	.cra_priority		=	PADLOCK_CRA_PRIORITY,
L
Linus Torvalds 已提交
324 325
	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER,
	.cra_blocksize		=	AES_BLOCK_SIZE,
326
	.cra_ctxsize		=	sizeof(struct aes_ctx),
327
	.cra_alignmask		=	PADLOCK_ALIGNMENT - 1,
L
Linus Torvalds 已提交
328 329 330 331 332 333 334 335
	.cra_module		=	THIS_MODULE,
	.cra_list		=	LIST_HEAD_INIT(aes_alg.cra_list),
	.cra_u			=	{
		.cipher = {
			.cia_min_keysize	=	AES_MIN_KEY_SIZE,
			.cia_max_keysize	=	AES_MAX_KEY_SIZE,
			.cia_setkey	   	= 	aes_set_key,
			.cia_encrypt	 	=	aes_encrypt,
336
			.cia_decrypt	  	=	aes_decrypt,
L
Linus Torvalds 已提交
337 338 339 340
		}
	}
};

341 342 343 344 345 346 347
static int ecb_aes_encrypt(struct blkcipher_desc *desc,
			   struct scatterlist *dst, struct scatterlist *src,
			   unsigned int nbytes)
{
	struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
	struct blkcipher_walk walk;
	int err;
348
	int ts_state;
349

350
	padlock_reset_key(&ctx->cword.encrypt);
351

352 353 354
	blkcipher_walk_init(&walk, dst, src, nbytes);
	err = blkcipher_walk_virt(desc, &walk);

355
	ts_state = irq_ts_save();
356 357 358 359 360 361 362
	while ((nbytes = walk.nbytes)) {
		padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
				   ctx->E, &ctx->cword.encrypt,
				   nbytes / AES_BLOCK_SIZE);
		nbytes &= AES_BLOCK_SIZE - 1;
		err = blkcipher_walk_done(desc, &walk, nbytes);
	}
363
	irq_ts_restore(ts_state);
364

365 366
	padlock_store_cword(&ctx->cword.encrypt);

367 368 369 370 371 372 373 374 375 376
	return err;
}

static int ecb_aes_decrypt(struct blkcipher_desc *desc,
			   struct scatterlist *dst, struct scatterlist *src,
			   unsigned int nbytes)
{
	struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
	struct blkcipher_walk walk;
	int err;
377
	int ts_state;
378

379
	padlock_reset_key(&ctx->cword.decrypt);
380

381 382 383
	blkcipher_walk_init(&walk, dst, src, nbytes);
	err = blkcipher_walk_virt(desc, &walk);

384
	ts_state = irq_ts_save();
385 386 387 388 389 390 391
	while ((nbytes = walk.nbytes)) {
		padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
				   ctx->D, &ctx->cword.decrypt,
				   nbytes / AES_BLOCK_SIZE);
		nbytes &= AES_BLOCK_SIZE - 1;
		err = blkcipher_walk_done(desc, &walk, nbytes);
	}
392
	irq_ts_restore(ts_state);
393 394 395

	padlock_store_cword(&ctx->cword.encrypt);

396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427
	return err;
}

static struct crypto_alg ecb_aes_alg = {
	.cra_name		=	"ecb(aes)",
	.cra_driver_name	=	"ecb-aes-padlock",
	.cra_priority		=	PADLOCK_COMPOSITE_PRIORITY,
	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER,
	.cra_blocksize		=	AES_BLOCK_SIZE,
	.cra_ctxsize		=	sizeof(struct aes_ctx),
	.cra_alignmask		=	PADLOCK_ALIGNMENT - 1,
	.cra_type		=	&crypto_blkcipher_type,
	.cra_module		=	THIS_MODULE,
	.cra_list		=	LIST_HEAD_INIT(ecb_aes_alg.cra_list),
	.cra_u			=	{
		.blkcipher = {
			.min_keysize		=	AES_MIN_KEY_SIZE,
			.max_keysize		=	AES_MAX_KEY_SIZE,
			.setkey	   		= 	aes_set_key,
			.encrypt		=	ecb_aes_encrypt,
			.decrypt		=	ecb_aes_decrypt,
		}
	}
};

static int cbc_aes_encrypt(struct blkcipher_desc *desc,
			   struct scatterlist *dst, struct scatterlist *src,
			   unsigned int nbytes)
{
	struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
	struct blkcipher_walk walk;
	int err;
428
	int ts_state;
429

430
	padlock_reset_key(&ctx->cword.encrypt);
431

432 433 434
	blkcipher_walk_init(&walk, dst, src, nbytes);
	err = blkcipher_walk_virt(desc, &walk);

435
	ts_state = irq_ts_save();
436 437 438 439 440 441 442 443 444
	while ((nbytes = walk.nbytes)) {
		u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
					    walk.dst.virt.addr, ctx->E,
					    walk.iv, &ctx->cword.encrypt,
					    nbytes / AES_BLOCK_SIZE);
		memcpy(walk.iv, iv, AES_BLOCK_SIZE);
		nbytes &= AES_BLOCK_SIZE - 1;
		err = blkcipher_walk_done(desc, &walk, nbytes);
	}
445
	irq_ts_restore(ts_state);
446

447 448
	padlock_store_cword(&ctx->cword.decrypt);

449 450 451 452 453 454 455 456 457 458
	return err;
}

static int cbc_aes_decrypt(struct blkcipher_desc *desc,
			   struct scatterlist *dst, struct scatterlist *src,
			   unsigned int nbytes)
{
	struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
	struct blkcipher_walk walk;
	int err;
459
	int ts_state;
460

461
	padlock_reset_key(&ctx->cword.encrypt);
462

463 464 465
	blkcipher_walk_init(&walk, dst, src, nbytes);
	err = blkcipher_walk_virt(desc, &walk);

466
	ts_state = irq_ts_save();
467 468 469 470 471 472 473 474
	while ((nbytes = walk.nbytes)) {
		padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
				   ctx->D, walk.iv, &ctx->cword.decrypt,
				   nbytes / AES_BLOCK_SIZE);
		nbytes &= AES_BLOCK_SIZE - 1;
		err = blkcipher_walk_done(desc, &walk, nbytes);
	}

475
	irq_ts_restore(ts_state);
476 477 478

	padlock_store_cword(&ctx->cword.encrypt);

479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504
	return err;
}

static struct crypto_alg cbc_aes_alg = {
	.cra_name		=	"cbc(aes)",
	.cra_driver_name	=	"cbc-aes-padlock",
	.cra_priority		=	PADLOCK_COMPOSITE_PRIORITY,
	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER,
	.cra_blocksize		=	AES_BLOCK_SIZE,
	.cra_ctxsize		=	sizeof(struct aes_ctx),
	.cra_alignmask		=	PADLOCK_ALIGNMENT - 1,
	.cra_type		=	&crypto_blkcipher_type,
	.cra_module		=	THIS_MODULE,
	.cra_list		=	LIST_HEAD_INIT(cbc_aes_alg.cra_list),
	.cra_u			=	{
		.blkcipher = {
			.min_keysize		=	AES_MIN_KEY_SIZE,
			.max_keysize		=	AES_MAX_KEY_SIZE,
			.ivsize			=	AES_BLOCK_SIZE,
			.setkey	   		= 	aes_set_key,
			.encrypt		=	cbc_aes_encrypt,
			.decrypt		=	cbc_aes_decrypt,
		}
	}
};

505
static int __init padlock_init(void)
L
Linus Torvalds 已提交
506
{
507
	int ret;
508
	struct cpuinfo_x86 *c = &cpu_data(0);
509 510

	if (!cpu_has_xcrypt) {
511
		printk(KERN_NOTICE PFX "VIA PadLock not detected.\n");
512 513 514 515
		return -ENODEV;
	}

	if (!cpu_has_xcrypt_enabled) {
516
		printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
517 518
		return -ENODEV;
	}
L
Linus Torvalds 已提交
519

520 521 522 523 524 525 526 527
	if ((ret = crypto_register_alg(&aes_alg)))
		goto aes_err;

	if ((ret = crypto_register_alg(&ecb_aes_alg)))
		goto ecb_aes_err;

	if ((ret = crypto_register_alg(&cbc_aes_alg)))
		goto cbc_aes_err;
528 529 530

	printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");

531
	if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) {
532 533
		ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
		cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
534 535 536
		printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
	}

537
out:
538
	return ret;
539 540 541 542 543 544 545 546

cbc_aes_err:
	crypto_unregister_alg(&ecb_aes_alg);
ecb_aes_err:
	crypto_unregister_alg(&aes_alg);
aes_err:
	printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
	goto out;
L
Linus Torvalds 已提交
547 548
}

549
static void __exit padlock_fini(void)
L
Linus Torvalds 已提交
550
{
551 552
	crypto_unregister_alg(&cbc_aes_alg);
	crypto_unregister_alg(&ecb_aes_alg);
L
Linus Torvalds 已提交
553 554
	crypto_unregister_alg(&aes_alg);
}
555 556 557 558 559 560 561 562

module_init(padlock_init);
module_exit(padlock_fini);

MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michal Ludvig");

563
MODULE_ALIAS("aes");