padlock-sha.c 8.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * Cryptographic API.
 *
 * Support for VIA PadLock hardware crypto engine.
 *
 * Copyright (c) 2006  Michal Ludvig <michal@logix.cz>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 */

15
#include <crypto/internal/hash.h>
16
#include <crypto/sha.h>
17
#include <linux/err.h>
18 19 20 21 22 23
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/scatterlist.h>
24
#include <asm/i387.h>
25 26
#include "padlock.h"

27 28
struct padlock_sha_desc {
	struct shash_desc fallback;
29 30
};

31 32 33
struct padlock_sha_ctx {
	struct crypto_shash *fallback;
};
34

35
static int padlock_sha_init(struct shash_desc *desc)
36
{
37 38
	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
	struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
39

40 41 42
	dctx->fallback.tfm = ctx->fallback;
	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
	return crypto_shash_init(&dctx->fallback);
43 44
}

45 46
static int padlock_sha_update(struct shash_desc *desc,
			      const u8 *data, unsigned int length)
47
{
48
	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
49

50 51
	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
	return crypto_shash_update(&dctx->fallback, data, length);
52 53 54 55 56 57 58 59 60
}

static inline void padlock_output_block(uint32_t *src,
		 	uint32_t *dst, size_t count)
{
	while (count--)
		*dst++ = swab32(*src++);
}

61 62
static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
			      unsigned int count, u8 *out)
63 64 65 66
{
	/* We can't store directly to *out as it may be unaligned. */
	/* BTW Don't reduce the buffer size below 128 Bytes!
	 *     PadLock microcode needs it that big. */
67 68 69 70 71
	char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT)));
	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
	struct sha1_state state;
	unsigned int space;
	unsigned int leftover;
72
	int ts_state;
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
	int err;

	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
	err = crypto_shash_export(&dctx->fallback, &state);
	if (err)
		goto out;

	if (state.count + count > ULONG_MAX)
		return crypto_shash_finup(&dctx->fallback, in, count, out);

	leftover = ((state.count - 1) & (SHA1_BLOCK_SIZE - 1)) + 1;
	space =  SHA1_BLOCK_SIZE - leftover;
	if (space) {
		if (count > space) {
			err = crypto_shash_update(&dctx->fallback, in, space) ?:
			      crypto_shash_export(&dctx->fallback, &state);
			if (err)
				goto out;
			count -= space;
			in += space;
		} else {
			memcpy(state.buffer + leftover, in, count);
			in = state.buffer;
			count += leftover;
97
			state.count &= ~(SHA1_BLOCK_SIZE - 1);
98 99 100 101
		}
	}

	memcpy(result, &state.state, SHA1_DIGEST_SIZE);
102

103 104
	/* prevent taking the spurious DNA fault with padlock. */
	ts_state = irq_ts_save();
105
	asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
106
		      : \
107 108
		      : "c"((unsigned long)state.count + count), \
			"a"((unsigned long)state.count), \
109
			"S"(in), "D"(result));
110
	irq_ts_restore(ts_state);
111 112

	padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
113 114 115

out:
	return err;
116 117
}

118 119 120 121 122 123 124 125 126
static int padlock_sha1_final(struct shash_desc *desc, u8 *out)
{
	u8 buf[4];

	return padlock_sha1_finup(desc, buf, 0, out);
}

static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
				unsigned int count, u8 *out)
127 128 129 130
{
	/* We can't store directly to *out as it may be unaligned. */
	/* BTW Don't reduce the buffer size below 128 Bytes!
	 *     PadLock microcode needs it that big. */
131 132 133 134 135
	char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT)));
	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
	struct sha256_state state;
	unsigned int space;
	unsigned int leftover;
136
	int ts_state;
137
	int err;
138

139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
	err = crypto_shash_export(&dctx->fallback, &state);
	if (err)
		goto out;

	if (state.count + count > ULONG_MAX)
		return crypto_shash_finup(&dctx->fallback, in, count, out);

	leftover = ((state.count - 1) & (SHA256_BLOCK_SIZE - 1)) + 1;
	space =  SHA256_BLOCK_SIZE - leftover;
	if (space) {
		if (count > space) {
			err = crypto_shash_update(&dctx->fallback, in, space) ?:
			      crypto_shash_export(&dctx->fallback, &state);
			if (err)
				goto out;
			count -= space;
			in += space;
		} else {
			memcpy(state.buf + leftover, in, count);
			in = state.buf;
			count += leftover;
161
			state.count &= ~(SHA1_BLOCK_SIZE - 1);
162 163 164 165
		}
	}

	memcpy(result, &state.state, SHA256_DIGEST_SIZE);
166

167 168
	/* prevent taking the spurious DNA fault with padlock. */
	ts_state = irq_ts_save();
169
	asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
170
		      : \
171 172
		      : "c"((unsigned long)state.count + count), \
			"a"((unsigned long)state.count), \
173
			"S"(in), "D"(result));
174
	irq_ts_restore(ts_state);
175 176

	padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
177 178 179

out:
	return err;
180 181
}

182
static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
183
{
184
	u8 buf[4];
185

186
	return padlock_sha256_finup(desc, buf, 0, out);
187 188
}

189
static int padlock_cra_init(struct crypto_tfm *tfm)
190
{
191
	struct crypto_shash *hash = __crypto_shash_cast(tfm);
192
	const char *fallback_driver_name = tfm->__crt_alg->cra_name;
193
	struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
194 195
	struct crypto_shash *fallback_tfm;
	int err = -ENOMEM;
196

197
	/* Allocate a fallback and abort if it failed. */
198 199
	fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
					  CRYPTO_ALG_NEED_FALLBACK);
200
	if (IS_ERR(fallback_tfm)) {
201 202
		printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
		       fallback_driver_name);
203
		err = PTR_ERR(fallback_tfm);
204
		goto out;
205 206
	}

207 208
	ctx->fallback = fallback_tfm;
	hash->descsize += crypto_shash_descsize(fallback_tfm);
209
	return 0;
210 211 212

out:
	return err;
213 214 215 216
}

static void padlock_cra_exit(struct crypto_tfm *tfm)
{
217
	struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
218

219
	crypto_free_shash(ctx->fallback);
220 221
}

222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
static struct shash_alg sha1_alg = {
	.digestsize	=	SHA1_DIGEST_SIZE,
	.init   	= 	padlock_sha_init,
	.update 	=	padlock_sha_update,
	.finup  	=	padlock_sha1_finup,
	.final  	=	padlock_sha1_final,
	.descsize	=	sizeof(struct padlock_sha_desc),
	.base		=	{
		.cra_name		=	"sha1",
		.cra_driver_name	=	"sha1-padlock",
		.cra_priority		=	PADLOCK_CRA_PRIORITY,
		.cra_flags		=	CRYPTO_ALG_TYPE_SHASH |
						CRYPTO_ALG_NEED_FALLBACK,
		.cra_blocksize		=	SHA1_BLOCK_SIZE,
		.cra_ctxsize		=	sizeof(struct padlock_sha_ctx),
		.cra_module		=	THIS_MODULE,
		.cra_init		=	padlock_cra_init,
		.cra_exit		=	padlock_cra_exit,
240 241 242
	}
};

243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
static struct shash_alg sha256_alg = {
	.digestsize	=	SHA256_DIGEST_SIZE,
	.init   	= 	padlock_sha_init,
	.update 	=	padlock_sha_update,
	.finup  	=	padlock_sha256_finup,
	.final  	=	padlock_sha256_final,
	.descsize	=	sizeof(struct padlock_sha_desc),
	.base		=	{
		.cra_name		=	"sha256",
		.cra_driver_name	=	"sha256-padlock",
		.cra_priority		=	PADLOCK_CRA_PRIORITY,
		.cra_flags		=	CRYPTO_ALG_TYPE_SHASH |
						CRYPTO_ALG_NEED_FALLBACK,
		.cra_blocksize		=	SHA256_BLOCK_SIZE,
		.cra_ctxsize		=	sizeof(struct padlock_sha_ctx),
		.cra_module		=	THIS_MODULE,
		.cra_init		=	padlock_cra_init,
		.cra_exit		=	padlock_cra_exit,
261 262 263 264 265 266 267 268
	}
};

static int __init padlock_init(void)
{
	int rc = -ENODEV;

	if (!cpu_has_phe) {
269
		printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n");
270 271 272 273
		return -ENODEV;
	}

	if (!cpu_has_phe_enabled) {
274
		printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
275 276 277
		return -ENODEV;
	}

278
	rc = crypto_register_shash(&sha1_alg);
279 280 281
	if (rc)
		goto out;

282
	rc = crypto_register_shash(&sha256_alg);
283 284 285 286 287 288 289 290
	if (rc)
		goto out_unreg1;

	printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n");

	return 0;

out_unreg1:
291
	crypto_unregister_shash(&sha1_alg);
292 293 294 295 296 297 298
out:
	printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
	return rc;
}

static void __exit padlock_fini(void)
{
299 300
	crypto_unregister_shash(&sha1_alg);
	crypto_unregister_shash(&sha256_alg);
301 302 303 304 305 306 307 308 309
}

module_init(padlock_init);
module_exit(padlock_fini);

MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michal Ludvig");

310 311
MODULE_ALIAS("sha1-all");
MODULE_ALIAS("sha256-all");
312 313
MODULE_ALIAS("sha1-padlock");
MODULE_ALIAS("sha256-padlock");