padlock-sha.c 9.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * Cryptographic API.
 *
 * Support for VIA PadLock hardware crypto engine.
 *
 * Copyright (c) 2006  Michal Ludvig <michal@logix.cz>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 */

15
#include <crypto/internal/hash.h>
16
#include <crypto/sha.h>
17
#include <linux/err.h>
18 19 20 21 22 23
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/scatterlist.h>
24
#include <asm/i387.h>
25 26
#include "padlock.h"

27 28 29 30 31 32
#ifdef CONFIG_64BIT
#define STACK_ALIGN 16
#else
#define STACK_ALIGN 4
#endif

33 34
struct padlock_sha_desc {
	struct shash_desc fallback;
35 36
};

37 38 39
struct padlock_sha_ctx {
	struct crypto_shash *fallback;
};
40

41
static int padlock_sha_init(struct shash_desc *desc)
42
{
43 44
	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
	struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
45

46 47 48
	dctx->fallback.tfm = ctx->fallback;
	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
	return crypto_shash_init(&dctx->fallback);
49 50
}

51 52
static int padlock_sha_update(struct shash_desc *desc,
			      const u8 *data, unsigned int length)
53
{
54
	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
55

56 57
	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
	return crypto_shash_update(&dctx->fallback, data, length);
58 59
}

60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
static int padlock_sha_export(struct shash_desc *desc, void *out)
{
	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);

	return crypto_shash_export(&dctx->fallback, out);
}

static int padlock_sha_import(struct shash_desc *desc, const void *in)
{
	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
	struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);

	dctx->fallback.tfm = ctx->fallback;
	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
	return crypto_shash_import(&dctx->fallback, in);
}

77 78 79 80 81 82 83
static inline void padlock_output_block(uint32_t *src,
		 	uint32_t *dst, size_t count)
{
	while (count--)
		*dst++ = swab32(*src++);
}

84 85
static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
			      unsigned int count, u8 *out)
86 87 88 89
{
	/* We can't store directly to *out as it may be unaligned. */
	/* BTW Don't reduce the buffer size below 128 Bytes!
	 *     PadLock microcode needs it that big. */
90 91 92
	char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
		((aligned(STACK_ALIGN)));
	char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
93 94 95 96
	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
	struct sha1_state state;
	unsigned int space;
	unsigned int leftover;
97
	int ts_state;
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
	int err;

	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
	err = crypto_shash_export(&dctx->fallback, &state);
	if (err)
		goto out;

	if (state.count + count > ULONG_MAX)
		return crypto_shash_finup(&dctx->fallback, in, count, out);

	leftover = ((state.count - 1) & (SHA1_BLOCK_SIZE - 1)) + 1;
	space =  SHA1_BLOCK_SIZE - leftover;
	if (space) {
		if (count > space) {
			err = crypto_shash_update(&dctx->fallback, in, space) ?:
			      crypto_shash_export(&dctx->fallback, &state);
			if (err)
				goto out;
			count -= space;
			in += space;
		} else {
			memcpy(state.buffer + leftover, in, count);
			in = state.buffer;
			count += leftover;
122
			state.count &= ~(SHA1_BLOCK_SIZE - 1);
123 124 125 126
		}
	}

	memcpy(result, &state.state, SHA1_DIGEST_SIZE);
127

128 129
	/* prevent taking the spurious DNA fault with padlock. */
	ts_state = irq_ts_save();
130
	asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
131
		      : \
132 133
		      : "c"((unsigned long)state.count + count), \
			"a"((unsigned long)state.count), \
134
			"S"(in), "D"(result));
135
	irq_ts_restore(ts_state);
136 137

	padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
138 139 140

out:
	return err;
141 142
}

143 144 145 146 147 148 149 150 151
static int padlock_sha1_final(struct shash_desc *desc, u8 *out)
{
	u8 buf[4];

	return padlock_sha1_finup(desc, buf, 0, out);
}

static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
				unsigned int count, u8 *out)
152 153 154 155
{
	/* We can't store directly to *out as it may be unaligned. */
	/* BTW Don't reduce the buffer size below 128 Bytes!
	 *     PadLock microcode needs it that big. */
156 157 158
	char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
		((aligned(STACK_ALIGN)));
	char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
159 160 161 162
	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
	struct sha256_state state;
	unsigned int space;
	unsigned int leftover;
163
	int ts_state;
164
	int err;
165

166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
	err = crypto_shash_export(&dctx->fallback, &state);
	if (err)
		goto out;

	if (state.count + count > ULONG_MAX)
		return crypto_shash_finup(&dctx->fallback, in, count, out);

	leftover = ((state.count - 1) & (SHA256_BLOCK_SIZE - 1)) + 1;
	space =  SHA256_BLOCK_SIZE - leftover;
	if (space) {
		if (count > space) {
			err = crypto_shash_update(&dctx->fallback, in, space) ?:
			      crypto_shash_export(&dctx->fallback, &state);
			if (err)
				goto out;
			count -= space;
			in += space;
		} else {
			memcpy(state.buf + leftover, in, count);
			in = state.buf;
			count += leftover;
188
			state.count &= ~(SHA1_BLOCK_SIZE - 1);
189 190 191 192
		}
	}

	memcpy(result, &state.state, SHA256_DIGEST_SIZE);
193

194 195
	/* prevent taking the spurious DNA fault with padlock. */
	ts_state = irq_ts_save();
196
	asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
197
		      : \
198 199
		      : "c"((unsigned long)state.count + count), \
			"a"((unsigned long)state.count), \
200
			"S"(in), "D"(result));
201
	irq_ts_restore(ts_state);
202 203

	padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
204 205 206

out:
	return err;
207 208
}

209
static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
210
{
211
	u8 buf[4];
212

213
	return padlock_sha256_finup(desc, buf, 0, out);
214 215
}

216
static int padlock_cra_init(struct crypto_tfm *tfm)
217
{
218
	struct crypto_shash *hash = __crypto_shash_cast(tfm);
219
	const char *fallback_driver_name = tfm->__crt_alg->cra_name;
220
	struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
221 222
	struct crypto_shash *fallback_tfm;
	int err = -ENOMEM;
223

224
	/* Allocate a fallback and abort if it failed. */
225 226
	fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
					  CRYPTO_ALG_NEED_FALLBACK);
227
	if (IS_ERR(fallback_tfm)) {
228 229
		printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
		       fallback_driver_name);
230
		err = PTR_ERR(fallback_tfm);
231
		goto out;
232 233
	}

234 235
	ctx->fallback = fallback_tfm;
	hash->descsize += crypto_shash_descsize(fallback_tfm);
236
	return 0;
237 238 239

out:
	return err;
240 241 242 243
}

static void padlock_cra_exit(struct crypto_tfm *tfm)
{
244
	struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
245

246
	crypto_free_shash(ctx->fallback);
247 248
}

249 250 251 252 253 254
static struct shash_alg sha1_alg = {
	.digestsize	=	SHA1_DIGEST_SIZE,
	.init   	= 	padlock_sha_init,
	.update 	=	padlock_sha_update,
	.finup  	=	padlock_sha1_finup,
	.final  	=	padlock_sha1_final,
255 256
	.export		=	padlock_sha_export,
	.import		=	padlock_sha_import,
257
	.descsize	=	sizeof(struct padlock_sha_desc),
258
	.statesize	=	sizeof(struct sha1_state),
259 260 261 262 263 264 265 266 267 268 269
	.base		=	{
		.cra_name		=	"sha1",
		.cra_driver_name	=	"sha1-padlock",
		.cra_priority		=	PADLOCK_CRA_PRIORITY,
		.cra_flags		=	CRYPTO_ALG_TYPE_SHASH |
						CRYPTO_ALG_NEED_FALLBACK,
		.cra_blocksize		=	SHA1_BLOCK_SIZE,
		.cra_ctxsize		=	sizeof(struct padlock_sha_ctx),
		.cra_module		=	THIS_MODULE,
		.cra_init		=	padlock_cra_init,
		.cra_exit		=	padlock_cra_exit,
270 271 272
	}
};

273 274 275 276 277 278
static struct shash_alg sha256_alg = {
	.digestsize	=	SHA256_DIGEST_SIZE,
	.init   	= 	padlock_sha_init,
	.update 	=	padlock_sha_update,
	.finup  	=	padlock_sha256_finup,
	.final  	=	padlock_sha256_final,
279 280
	.export		=	padlock_sha_export,
	.import		=	padlock_sha_import,
281
	.descsize	=	sizeof(struct padlock_sha_desc),
282
	.statesize	=	sizeof(struct sha256_state),
283 284 285 286 287 288 289 290 291 292 293
	.base		=	{
		.cra_name		=	"sha256",
		.cra_driver_name	=	"sha256-padlock",
		.cra_priority		=	PADLOCK_CRA_PRIORITY,
		.cra_flags		=	CRYPTO_ALG_TYPE_SHASH |
						CRYPTO_ALG_NEED_FALLBACK,
		.cra_blocksize		=	SHA256_BLOCK_SIZE,
		.cra_ctxsize		=	sizeof(struct padlock_sha_ctx),
		.cra_module		=	THIS_MODULE,
		.cra_init		=	padlock_cra_init,
		.cra_exit		=	padlock_cra_exit,
294 295 296 297 298 299 300 301
	}
};

static int __init padlock_init(void)
{
	int rc = -ENODEV;

	if (!cpu_has_phe) {
302
		printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n");
303 304 305 306
		return -ENODEV;
	}

	if (!cpu_has_phe_enabled) {
307
		printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
308 309 310
		return -ENODEV;
	}

311
	rc = crypto_register_shash(&sha1_alg);
312 313 314
	if (rc)
		goto out;

315
	rc = crypto_register_shash(&sha256_alg);
316 317 318 319 320 321 322 323
	if (rc)
		goto out_unreg1;

	printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n");

	return 0;

out_unreg1:
324
	crypto_unregister_shash(&sha1_alg);
325 326 327 328 329 330 331
out:
	printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
	return rc;
}

static void __exit padlock_fini(void)
{
332 333
	crypto_unregister_shash(&sha1_alg);
	crypto_unregister_shash(&sha256_alg);
334 335 336 337 338 339 340 341 342
}

module_init(padlock_init);
module_exit(padlock_fini);

MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michal Ludvig");

343 344
MODULE_ALIAS("sha1-all");
MODULE_ALIAS("sha256-all");
345 346
MODULE_ALIAS("sha1-padlock");
MODULE_ALIAS("sha256-padlock");