aes_cbc.c 5.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/**
 * AES CBC routines supporting VMX instructions on the Power 8
 *
 * Copyright (C) 2015 International Business Machines Inc.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; version 2 only.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 *
 * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
 */

#include <linux/types.h>
#include <linux/err.h>
#include <linux/crypto.h>
#include <linux/delay.h>
#include <linux/hardirq.h>
#include <asm/switch_to.h>
#include <crypto/aes.h>
#include <crypto/scatterwalk.h>
30
#include <crypto/skcipher.h>
31 32 33 34

#include "aesp8-ppc.h"

struct p8_aes_cbc_ctx {
35
	struct crypto_skcipher *fallback;
36 37
	struct aes_key enc_key;
	struct aes_key dec_key;
38 39 40 41
};

static int p8_aes_cbc_init(struct crypto_tfm *tfm)
{
42
	const char *alg = crypto_tfm_alg_name(tfm);
43
	struct crypto_skcipher *fallback;
44 45
	struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);

46 47 48
	fallback = crypto_alloc_skcipher(alg, 0,
			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);

49 50 51 52 53 54 55
	if (IS_ERR(fallback)) {
		printk(KERN_ERR
		       "Failed to allocate transformation for '%s': %ld\n",
		       alg, PTR_ERR(fallback));
		return PTR_ERR(fallback);
	}

56
	crypto_skcipher_set_flags(
57
		fallback,
58
		crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
59 60 61
	ctx->fallback = fallback;

	return 0;
62 63 64 65
}

static void p8_aes_cbc_exit(struct crypto_tfm *tfm)
{
66
	struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
67

68
	if (ctx->fallback) {
69
		crypto_free_skcipher(ctx->fallback);
70 71
		ctx->fallback = NULL;
	}
72 73 74
}

static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
75
			     unsigned int keylen)
76
{
77 78
	int ret;
	struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
79

80
	preempt_disable();
81
	pagefault_disable();
82
	enable_kernel_vsx();
83 84
	ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
	ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
85
	disable_kernel_vsx();
86
	pagefault_enable();
87
	preempt_enable();
88

89
	ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
90
	return ret;
91 92 93
}

static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
94 95
			      struct scatterlist *dst,
			      struct scatterlist *src, unsigned int nbytes)
96
{
97 98 99 100 101 102
	int ret;
	struct blkcipher_walk walk;
	struct p8_aes_cbc_ctx *ctx =
		crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));

	if (in_interrupt()) {
103 104 105 106 107 108
		SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
		skcipher_request_set_tfm(req, ctx->fallback);
		skcipher_request_set_callback(req, desc->flags, NULL, NULL);
		skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
		ret = crypto_skcipher_encrypt(req);
		skcipher_request_zero(req);
109 110 111 112
	} else {
		blkcipher_walk_init(&walk, dst, src, nbytes);
		ret = blkcipher_walk_virt(desc, &walk);
		while ((nbytes = walk.nbytes)) {
113 114 115
			preempt_disable();
			pagefault_disable();
			enable_kernel_vsx();
116 117 118 119
			aes_p8_cbc_encrypt(walk.src.virt.addr,
					   walk.dst.virt.addr,
					   nbytes & AES_BLOCK_MASK,
					   &ctx->enc_key, walk.iv, 1);
120 121 122 123
			disable_kernel_vsx();
			pagefault_enable();
			preempt_enable();

124 125
			nbytes &= AES_BLOCK_SIZE - 1;
			ret = blkcipher_walk_done(desc, &walk, nbytes);
126 127
		}
	}
128

129
	return ret;
130 131 132
}

static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
133 134
			      struct scatterlist *dst,
			      struct scatterlist *src, unsigned int nbytes)
135
{
136 137 138 139 140 141
	int ret;
	struct blkcipher_walk walk;
	struct p8_aes_cbc_ctx *ctx =
		crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));

	if (in_interrupt()) {
142 143 144 145 146 147
		SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
		skcipher_request_set_tfm(req, ctx->fallback);
		skcipher_request_set_callback(req, desc->flags, NULL, NULL);
		skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
		ret = crypto_skcipher_decrypt(req);
		skcipher_request_zero(req);
148 149 150 151
	} else {
		blkcipher_walk_init(&walk, dst, src, nbytes);
		ret = blkcipher_walk_virt(desc, &walk);
		while ((nbytes = walk.nbytes)) {
152 153 154
			preempt_disable();
			pagefault_disable();
			enable_kernel_vsx();
155 156 157 158
			aes_p8_cbc_encrypt(walk.src.virt.addr,
					   walk.dst.virt.addr,
					   nbytes & AES_BLOCK_MASK,
					   &ctx->dec_key, walk.iv, 0);
159 160 161 162
			disable_kernel_vsx();
			pagefault_enable();
			preempt_enable();

163 164 165
			nbytes &= AES_BLOCK_SIZE - 1;
			ret = blkcipher_walk_done(desc, &walk, nbytes);
		}
166
	}
167

168
	return ret;
169 170 171 172
}


struct crypto_alg p8_aes_cbc_alg = {
173 174 175
	.cra_name = "cbc(aes)",
	.cra_driver_name = "p8_aes_cbc",
	.cra_module = THIS_MODULE,
176
	.cra_priority = 2000,
177 178 179 180 181 182 183 184
	.cra_type = &crypto_blkcipher_type,
	.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
	.cra_alignmask = 0,
	.cra_blocksize = AES_BLOCK_SIZE,
	.cra_ctxsize = sizeof(struct p8_aes_cbc_ctx),
	.cra_init = p8_aes_cbc_init,
	.cra_exit = p8_aes_cbc_exit,
	.cra_blkcipher = {
185
			  .ivsize = AES_BLOCK_SIZE,
186 187 188 189 190 191
			  .min_keysize = AES_MIN_KEY_SIZE,
			  .max_keysize = AES_MAX_KEY_SIZE,
			  .setkey = p8_aes_cbc_setkey,
			  .encrypt = p8_aes_cbc_encrypt,
			  .decrypt = p8_aes_cbc_decrypt,
	},
192
};