dm-crypt.c 23.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
M
Milan Broz 已提交
4
 * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
L
Linus Torvalds 已提交
5 6 7 8
 *
 * This file is released under the GPL.
 */

9
#include <linux/err.h>
L
Linus Torvalds 已提交
10 11 12 13 14 15 16 17 18 19
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/mempool.h>
#include <linux/slab.h>
#include <linux/crypto.h>
#include <linux/workqueue.h>
#include <asm/atomic.h>
20
#include <linux/scatterlist.h>
L
Linus Torvalds 已提交
21 22 23 24
#include <asm/page.h>

#include "dm.h"

25
#define DM_MSG_PREFIX "crypt"
M
Milan Broz 已提交
26
#define MESG_STR(x) x, sizeof(x)
L
Linus Torvalds 已提交
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67

/*
 * per bio private data
 */
struct crypt_io {
	struct dm_target *target;
	struct bio *bio;
	struct bio *first_clone;
	struct work_struct work;
	atomic_t pending;
	int error;
};

/*
 * context holding the current state of a multi-part conversion
 */
struct convert_context {
	struct bio *bio_in;
	struct bio *bio_out;
	unsigned int offset_in;
	unsigned int offset_out;
	unsigned int idx_in;
	unsigned int idx_out;
	sector_t sector;
	int write;
};

struct crypt_config;

struct crypt_iv_operations {
	int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
	           const char *opts);
	void (*dtr)(struct crypt_config *cc);
	const char *(*status)(struct crypt_config *cc);
	int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector);
};

/*
 * Crypt: maps a linear range of a block device
 * and encrypts / decrypts at the same time.
 */
M
Milan Broz 已提交
68
enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
L
Linus Torvalds 已提交
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
struct crypt_config {
	struct dm_dev *dev;
	sector_t start;

	/*
	 * pool for per bio private data and
	 * for encryption buffer pages
	 */
	mempool_t *io_pool;
	mempool_t *page_pool;

	/*
	 * crypto related data
	 */
	struct crypt_iv_operations *iv_gen_ops;
	char *iv_mode;
85
	struct crypto_cipher *iv_gen_private;
L
Linus Torvalds 已提交
86 87 88
	sector_t iv_offset;
	unsigned int iv_size;

89 90 91
	char cipher[CRYPTO_MAX_ALG_NAME];
	char chainmode[CRYPTO_MAX_ALG_NAME];
	struct crypto_blkcipher *tfm;
M
Milan Broz 已提交
92
	unsigned long flags;
L
Linus Torvalds 已提交
93 94 95 96 97 98 99 100 101 102 103 104 105
	unsigned int key_size;
	u8 key[0];
};

#define MIN_IOS        256
#define MIN_POOL_PAGES 32
#define MIN_BIO_PAGES  8

static kmem_cache_t *_crypt_io_pool;

/*
 * Different IV generation algorithms:
 *
106
 * plain: the initial vector is the 32-bit little-endian version of the sector
L
Linus Torvalds 已提交
107 108
 *        number, padded with zeros if neccessary.
 *
109 110 111
 * essiv: "encrypted sector|salt initial vector", the sector number is
 *        encrypted with the bulk cipher using a salt as key. The salt
 *        should be derived from the bulk cipher's key via hashing.
L
Linus Torvalds 已提交
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
 *
 * plumb: unimplemented, see:
 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
 */

static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
	memset(iv, 0, cc->iv_size);
	*(u32 *)iv = cpu_to_le32(sector & 0xffffffff);

	return 0;
}

static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
	                      const char *opts)
{
128
	struct crypto_cipher *essiv_tfm;
129 130
	struct crypto_hash *hash_tfm;
	struct hash_desc desc;
L
Linus Torvalds 已提交
131 132 133
	struct scatterlist sg;
	unsigned int saltsize;
	u8 *salt;
134
	int err;
L
Linus Torvalds 已提交
135 136

	if (opts == NULL) {
137
		ti->error = "Digest algorithm missing for ESSIV mode";
L
Linus Torvalds 已提交
138 139 140 141
		return -EINVAL;
	}

	/* Hash the cipher key with the given hash algorithm */
142 143
	hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(hash_tfm)) {
144
		ti->error = "Error initializing ESSIV hash";
145
		return PTR_ERR(hash_tfm);
L
Linus Torvalds 已提交
146 147
	}

148
	saltsize = crypto_hash_digestsize(hash_tfm);
L
Linus Torvalds 已提交
149 150
	salt = kmalloc(saltsize, GFP_KERNEL);
	if (salt == NULL) {
151
		ti->error = "Error kmallocing salt storage in ESSIV";
152
		crypto_free_hash(hash_tfm);
L
Linus Torvalds 已提交
153 154 155
		return -ENOMEM;
	}

156
	sg_set_buf(&sg, cc->key, cc->key_size);
157 158 159 160 161 162 163 164 165
	desc.tfm = hash_tfm;
	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
	err = crypto_hash_digest(&desc, &sg, cc->key_size, salt);
	crypto_free_hash(hash_tfm);

	if (err) {
		ti->error = "Error calculating hash in ESSIV";
		return err;
	}
L
Linus Torvalds 已提交
166 167

	/* Setup the essiv_tfm with the given salt */
168 169
	essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(essiv_tfm)) {
170
		ti->error = "Error allocating crypto tfm for ESSIV";
L
Linus Torvalds 已提交
171
		kfree(salt);
172
		return PTR_ERR(essiv_tfm);
L
Linus Torvalds 已提交
173
	}
174 175
	if (crypto_cipher_blocksize(essiv_tfm) !=
	    crypto_blkcipher_ivsize(cc->tfm)) {
176
		ti->error = "Block size of ESSIV cipher does "
L
Linus Torvalds 已提交
177
			        "not match IV size of block cipher";
178
		crypto_free_cipher(essiv_tfm);
L
Linus Torvalds 已提交
179 180 181
		kfree(salt);
		return -EINVAL;
	}
182 183
	err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
	if (err) {
184
		ti->error = "Failed to set key for ESSIV cipher";
185
		crypto_free_cipher(essiv_tfm);
L
Linus Torvalds 已提交
186
		kfree(salt);
187
		return err;
L
Linus Torvalds 已提交
188 189 190
	}
	kfree(salt);

191
	cc->iv_gen_private = essiv_tfm;
L
Linus Torvalds 已提交
192 193 194 195 196
	return 0;
}

static void crypt_iv_essiv_dtr(struct crypt_config *cc)
{
197
	crypto_free_cipher(cc->iv_gen_private);
L
Linus Torvalds 已提交
198 199 200 201 202 203 204
	cc->iv_gen_private = NULL;
}

static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
	memset(iv, 0, cc->iv_size);
	*(u64 *)iv = cpu_to_le64(sector);
205
	crypto_cipher_encrypt_one(cc->iv_gen_private, iv, iv);
L
Linus Torvalds 已提交
206 207 208 209 210 211 212 213 214 215 216 217 218 219
	return 0;
}

static struct crypt_iv_operations crypt_iv_plain_ops = {
	.generator = crypt_iv_plain_gen
};

static struct crypt_iv_operations crypt_iv_essiv_ops = {
	.ctr       = crypt_iv_essiv_ctr,
	.dtr       = crypt_iv_essiv_dtr,
	.generator = crypt_iv_essiv_gen
};


220
static int
L
Linus Torvalds 已提交
221 222 223 224 225
crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
                          struct scatterlist *in, unsigned int length,
                          int write, sector_t sector)
{
	u8 iv[cc->iv_size];
226 227 228 229 230
	struct blkcipher_desc desc = {
		.tfm = cc->tfm,
		.info = iv,
		.flags = CRYPTO_TFM_REQ_MAY_SLEEP,
	};
L
Linus Torvalds 已提交
231 232 233 234 235 236 237 238
	int r;

	if (cc->iv_gen_ops) {
		r = cc->iv_gen_ops->generator(cc, iv, sector);
		if (r < 0)
			return r;

		if (write)
239
			r = crypto_blkcipher_encrypt_iv(&desc, out, in, length);
L
Linus Torvalds 已提交
240
		else
241
			r = crypto_blkcipher_decrypt_iv(&desc, out, in, length);
L
Linus Torvalds 已提交
242 243
	} else {
		if (write)
244
			r = crypto_blkcipher_encrypt(&desc, out, in, length);
L
Linus Torvalds 已提交
245
		else
246
			r = crypto_blkcipher_decrypt(&desc, out, in, length);
L
Linus Torvalds 已提交
247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
	}

	return r;
}

static void
crypt_convert_init(struct crypt_config *cc, struct convert_context *ctx,
                   struct bio *bio_out, struct bio *bio_in,
                   sector_t sector, int write)
{
	ctx->bio_in = bio_in;
	ctx->bio_out = bio_out;
	ctx->offset_in = 0;
	ctx->offset_out = 0;
	ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
	ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
	ctx->sector = sector + cc->iv_offset;
	ctx->write = write;
}

/*
 * Encrypt / decrypt data from one bio to another one (can be the same one)
 */
static int crypt_convert(struct crypt_config *cc,
                         struct convert_context *ctx)
{
	int r = 0;

	while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
	      ctx->idx_out < ctx->bio_out->bi_vcnt) {
		struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
		struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
		struct scatterlist sg_in = {
			.page = bv_in->bv_page,
			.offset = bv_in->bv_offset + ctx->offset_in,
			.length = 1 << SECTOR_SHIFT
		};
		struct scatterlist sg_out = {
			.page = bv_out->bv_page,
			.offset = bv_out->bv_offset + ctx->offset_out,
			.length = 1 << SECTOR_SHIFT
		};

		ctx->offset_in += sg_in.length;
		if (ctx->offset_in >= bv_in->bv_len) {
			ctx->offset_in = 0;
			ctx->idx_in++;
		}

		ctx->offset_out += sg_out.length;
		if (ctx->offset_out >= bv_out->bv_len) {
			ctx->offset_out = 0;
			ctx->idx_out++;
		}

		r = crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length,
		                              ctx->write, ctx->sector);
		if (r < 0)
			break;

		ctx->sector++;
	}

	return r;
}

/*
 * Generate a new unfragmented bio with the given size
 * This should never violate the device limitations
 * May return a smaller bio when running out of pages
 */
static struct bio *
crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
                   struct bio *base_bio, unsigned int *bio_vec_idx)
{
	struct bio *bio;
	unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
324
	gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
L
Linus Torvalds 已提交
325 326 327
	unsigned int i;

	/*
N
Nick Piggin 已提交
328 329
	 * Use __GFP_NOMEMALLOC to tell the VM to act less aggressively and
	 * to fail earlier.  This is not necessary but increases throughput.
L
Linus Torvalds 已提交
330 331 332
	 * FIXME: Is this really intelligent?
	 */
	if (base_bio)
N
Nick Piggin 已提交
333
		bio = bio_clone(base_bio, GFP_NOIO|__GFP_NOMEMALLOC);
L
Linus Torvalds 已提交
334
	else
N
Nick Piggin 已提交
335 336
		bio = bio_alloc(GFP_NOIO|__GFP_NOMEMALLOC, nr_iovecs);
	if (!bio)
L
Linus Torvalds 已提交
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513
		return NULL;

	/* if the last bio was not complete, continue where that one ended */
	bio->bi_idx = *bio_vec_idx;
	bio->bi_vcnt = *bio_vec_idx;
	bio->bi_size = 0;
	bio->bi_flags &= ~(1 << BIO_SEG_VALID);

	/* bio->bi_idx pages have already been allocated */
	size -= bio->bi_idx * PAGE_SIZE;

	for(i = bio->bi_idx; i < nr_iovecs; i++) {
		struct bio_vec *bv = bio_iovec_idx(bio, i);

		bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask);
		if (!bv->bv_page)
			break;

		/*
		 * if additional pages cannot be allocated without waiting,
		 * return a partially allocated bio, the caller will then try
		 * to allocate additional bios while submitting this partial bio
		 */
		if ((i - bio->bi_idx) == (MIN_BIO_PAGES - 1))
			gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;

		bv->bv_offset = 0;
		if (size > PAGE_SIZE)
			bv->bv_len = PAGE_SIZE;
		else
			bv->bv_len = size;

		bio->bi_size += bv->bv_len;
		bio->bi_vcnt++;
		size -= bv->bv_len;
	}

	if (!bio->bi_size) {
		bio_put(bio);
		return NULL;
	}

	/*
	 * Remember the last bio_vec allocated to be able
	 * to correctly continue after the splitting.
	 */
	*bio_vec_idx = bio->bi_vcnt;

	return bio;
}

static void crypt_free_buffer_pages(struct crypt_config *cc,
                                    struct bio *bio, unsigned int bytes)
{
	unsigned int i, start, end;
	struct bio_vec *bv;

	/*
	 * This is ugly, but Jens Axboe thinks that using bi_idx in the
	 * endio function is too dangerous at the moment, so I calculate the
	 * correct position using bi_vcnt and bi_size.
	 * The bv_offset and bv_len fields might already be modified but we
	 * know that we always allocated whole pages.
	 * A fix to the bi_idx issue in the kernel is in the works, so
	 * we will hopefully be able to revert to the cleaner solution soon.
	 */
	i = bio->bi_vcnt - 1;
	bv = bio_iovec_idx(bio, i);
	end = (i << PAGE_SHIFT) + (bv->bv_offset + bv->bv_len) - bio->bi_size;
	start = end - bytes;

	start >>= PAGE_SHIFT;
	if (!bio->bi_size)
		end = bio->bi_vcnt;
	else
		end >>= PAGE_SHIFT;

	for(i = start; i < end; i++) {
		bv = bio_iovec_idx(bio, i);
		BUG_ON(!bv->bv_page);
		mempool_free(bv->bv_page, cc->page_pool);
		bv->bv_page = NULL;
	}
}

/*
 * One of the bios was finished. Check for completion of
 * the whole request and correctly clean up the buffer.
 */
static void dec_pending(struct crypt_io *io, int error)
{
	struct crypt_config *cc = (struct crypt_config *) io->target->private;

	if (error < 0)
		io->error = error;

	if (!atomic_dec_and_test(&io->pending))
		return;

	if (io->first_clone)
		bio_put(io->first_clone);

	bio_endio(io->bio, io->bio->bi_size, io->error);

	mempool_free(io, cc->io_pool);
}

/*
 * kcryptd:
 *
 * Needed because it would be very unwise to do decryption in an
 * interrupt context, so bios returning from read requests get
 * queued here.
 */
static struct workqueue_struct *_kcryptd_workqueue;

static void kcryptd_do_work(void *data)
{
	struct crypt_io *io = (struct crypt_io *) data;
	struct crypt_config *cc = (struct crypt_config *) io->target->private;
	struct convert_context ctx;
	int r;

	crypt_convert_init(cc, &ctx, io->bio, io->bio,
	                   io->bio->bi_sector - io->target->begin, 0);
	r = crypt_convert(cc, &ctx);

	dec_pending(io, r);
}

static void kcryptd_queue_io(struct crypt_io *io)
{
	INIT_WORK(&io->work, kcryptd_do_work, io);
	queue_work(_kcryptd_workqueue, &io->work);
}

/*
 * Decode key from its hex representation
 */
static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
{
	char buffer[3];
	char *endp;
	unsigned int i;

	buffer[2] = '\0';

	for(i = 0; i < size; i++) {
		buffer[0] = *hex++;
		buffer[1] = *hex++;

		key[i] = (u8)simple_strtoul(buffer, &endp, 16);

		if (endp != &buffer[2])
			return -EINVAL;
	}

	if (*hex != '\0')
		return -EINVAL;

	return 0;
}

/*
 * Encode key into its hex representation
 */
static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
{
	unsigned int i;

	for(i = 0; i < size; i++) {
		sprintf(hex, "%02x", *key);
		hex += 2;
		key++;
	}
}

M
Milan Broz 已提交
514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
static int crypt_set_key(struct crypt_config *cc, char *key)
{
	unsigned key_size = strlen(key) >> 1;

	if (cc->key_size && cc->key_size != key_size)
		return -EINVAL;

	cc->key_size = key_size; /* initial settings */

	if ((!key_size && strcmp(key, "-")) ||
	    (key_size && crypt_decode_key(cc->key, key, key_size) < 0))
		return -EINVAL;

	set_bit(DM_CRYPT_KEY_VALID, &cc->flags);

	return 0;
}

static int crypt_wipe_key(struct crypt_config *cc)
{
	clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
	memset(&cc->key, 0, cc->key_size * sizeof(u8));
	return 0;
}

L
Linus Torvalds 已提交
539 540 541 542 543 544 545
/*
 * Construct an encryption mapping:
 * <cipher> <key> <iv_offset> <dev_path> <start>
 */
static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
	struct crypt_config *cc;
546
	struct crypto_blkcipher *tfm;
L
Linus Torvalds 已提交
547 548 549 550 551 552
	char *tmp;
	char *cipher;
	char *chainmode;
	char *ivmode;
	char *ivopts;
	unsigned int key_size;
A
Andrew Morton 已提交
553
	unsigned long long tmpll;
L
Linus Torvalds 已提交
554 555

	if (argc != 5) {
556
		ti->error = "Not enough arguments";
L
Linus Torvalds 已提交
557 558 559 560 561 562 563 564 565 566
		return -EINVAL;
	}

	tmp = argv[0];
	cipher = strsep(&tmp, "-");
	chainmode = strsep(&tmp, "-");
	ivopts = strsep(&tmp, "-");
	ivmode = strsep(&ivopts, ":");

	if (tmp)
567
		DMWARN("Unexpected additional cipher options");
L
Linus Torvalds 已提交
568 569 570

	key_size = strlen(argv[1]) >> 1;

M
Milan Broz 已提交
571
 	cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
L
Linus Torvalds 已提交
572 573
	if (cc == NULL) {
		ti->error =
574
			"Cannot allocate transparent encryption context";
L
Linus Torvalds 已提交
575 576 577
		return -ENOMEM;
	}

M
Milan Broz 已提交
578
 	if (crypt_set_key(cc, argv[1])) {
579
		ti->error = "Error decoding key";
L
Linus Torvalds 已提交
580 581 582 583 584 585 586 587 588
		goto bad1;
	}

	/* Compatiblity mode for old dm-crypt cipher strings */
	if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) {
		chainmode = "cbc";
		ivmode = "plain";
	}

589 590
	if (strcmp(chainmode, "ecb") && !ivmode) {
		ti->error = "This chaining mode requires an IV mechanism";
L
Linus Torvalds 已提交
591 592 593
		goto bad1;
	}

594 595 596
	if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)", chainmode, 
		     cipher) >= CRYPTO_MAX_ALG_NAME) {
		ti->error = "Chain mode + cipher name is too long";
L
Linus Torvalds 已提交
597 598 599
		goto bad1;
	}

600 601
	tfm = crypto_alloc_blkcipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(tfm)) {
602
		ti->error = "Error allocating crypto tfm";
L
Linus Torvalds 已提交
603 604 605
		goto bad1;
	}

606 607
	strcpy(cc->cipher, cipher);
	strcpy(cc->chainmode, chainmode);
L
Linus Torvalds 已提交
608 609 610 611 612 613 614 615 616 617 618 619 620 621
	cc->tfm = tfm;

	/*
	 * Choose ivmode. Valid modes: "plain", "essiv:<esshash>".
	 * See comments at iv code
	 */

	if (ivmode == NULL)
		cc->iv_gen_ops = NULL;
	else if (strcmp(ivmode, "plain") == 0)
		cc->iv_gen_ops = &crypt_iv_plain_ops;
	else if (strcmp(ivmode, "essiv") == 0)
		cc->iv_gen_ops = &crypt_iv_essiv_ops;
	else {
622
		ti->error = "Invalid IV mode";
L
Linus Torvalds 已提交
623 624 625 626 627 628 629
		goto bad2;
	}

	if (cc->iv_gen_ops && cc->iv_gen_ops->ctr &&
	    cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0)
		goto bad2;

630 631
	cc->iv_size = crypto_blkcipher_ivsize(tfm);
	if (cc->iv_size)
L
Linus Torvalds 已提交
632
		/* at least a 64 bit sector number should fit in our buffer */
633
		cc->iv_size = max(cc->iv_size,
L
Linus Torvalds 已提交
634 635 636
		                  (unsigned int)(sizeof(u64) / sizeof(u8)));
	else {
		if (cc->iv_gen_ops) {
637
			DMWARN("Selected cipher does not support IVs");
L
Linus Torvalds 已提交
638 639 640 641 642 643
			if (cc->iv_gen_ops->dtr)
				cc->iv_gen_ops->dtr(cc);
			cc->iv_gen_ops = NULL;
		}
	}

644
	cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
L
Linus Torvalds 已提交
645
	if (!cc->io_pool) {
646
		ti->error = "Cannot allocate crypt io mempool";
L
Linus Torvalds 已提交
647 648 649
		goto bad3;
	}

650
	cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
L
Linus Torvalds 已提交
651
	if (!cc->page_pool) {
652
		ti->error = "Cannot allocate page mempool";
L
Linus Torvalds 已提交
653 654 655
		goto bad4;
	}

656
	if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) {
657
		ti->error = "Error setting key";
L
Linus Torvalds 已提交
658 659 660
		goto bad5;
	}

A
Andrew Morton 已提交
661
	if (sscanf(argv[2], "%llu", &tmpll) != 1) {
662
		ti->error = "Invalid iv_offset sector";
L
Linus Torvalds 已提交
663 664
		goto bad5;
	}
A
Andrew Morton 已提交
665
	cc->iv_offset = tmpll;
L
Linus Torvalds 已提交
666

A
Andrew Morton 已提交
667
	if (sscanf(argv[4], "%llu", &tmpll) != 1) {
668
		ti->error = "Invalid device sector";
L
Linus Torvalds 已提交
669 670
		goto bad5;
	}
A
Andrew Morton 已提交
671
	cc->start = tmpll;
L
Linus Torvalds 已提交
672 673 674

	if (dm_get_device(ti, argv[3], cc->start, ti->len,
	                  dm_table_get_mode(ti->table), &cc->dev)) {
675
		ti->error = "Device lookup failed";
L
Linus Torvalds 已提交
676 677 678 679 680 681 682 683
		goto bad5;
	}

	if (ivmode && cc->iv_gen_ops) {
		if (ivopts)
			*(ivopts - 1) = ':';
		cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL);
		if (!cc->iv_mode) {
684
			ti->error = "Error kmallocing iv_mode string";
L
Linus Torvalds 已提交
685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701
			goto bad5;
		}
		strcpy(cc->iv_mode, ivmode);
	} else
		cc->iv_mode = NULL;

	ti->private = cc;
	return 0;

bad5:
	mempool_destroy(cc->page_pool);
bad4:
	mempool_destroy(cc->io_pool);
bad3:
	if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
		cc->iv_gen_ops->dtr(cc);
bad2:
702
	crypto_free_blkcipher(tfm);
L
Linus Torvalds 已提交
703
bad1:
704 705
	/* Must zero key material before freeing */
	memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
L
Linus Torvalds 已提交
706 707 708 709 710 711 712 713 714 715 716
	kfree(cc);
	return -EINVAL;
}

static void crypt_dtr(struct dm_target *ti)
{
	struct crypt_config *cc = (struct crypt_config *) ti->private;

	mempool_destroy(cc->page_pool);
	mempool_destroy(cc->io_pool);

717
	kfree(cc->iv_mode);
L
Linus Torvalds 已提交
718 719
	if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
		cc->iv_gen_ops->dtr(cc);
720
	crypto_free_blkcipher(cc->tfm);
L
Linus Torvalds 已提交
721
	dm_put_device(ti, cc->dev);
722 723 724

	/* Must zero key material before freeing */
	memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
L
Linus Torvalds 已提交
725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809
	kfree(cc);
}

static int crypt_endio(struct bio *bio, unsigned int done, int error)
{
	struct crypt_io *io = (struct crypt_io *) bio->bi_private;
	struct crypt_config *cc = (struct crypt_config *) io->target->private;

	if (bio_data_dir(bio) == WRITE) {
		/*
		 * free the processed pages, even if
		 * it's only a partially completed write
		 */
		crypt_free_buffer_pages(cc, bio, done);
	}

	if (bio->bi_size)
		return 1;

	bio_put(bio);

	/*
	 * successful reads are decrypted by the worker thread
	 */
	if ((bio_data_dir(bio) == READ)
	    && bio_flagged(bio, BIO_UPTODATE)) {
		kcryptd_queue_io(io);
		return 0;
	}

	dec_pending(io, error);
	return error;
}

static inline struct bio *
crypt_clone(struct crypt_config *cc, struct crypt_io *io, struct bio *bio,
            sector_t sector, unsigned int *bvec_idx,
            struct convert_context *ctx)
{
	struct bio *clone;

	if (bio_data_dir(bio) == WRITE) {
		clone = crypt_alloc_buffer(cc, bio->bi_size,
                                 io->first_clone, bvec_idx);
		if (clone) {
			ctx->bio_out = clone;
			if (crypt_convert(cc, ctx) < 0) {
				crypt_free_buffer_pages(cc, clone,
				                        clone->bi_size);
				bio_put(clone);
				return NULL;
			}
		}
	} else {
		/*
		 * The block layer might modify the bvec array, so always
		 * copy the required bvecs because we need the original
		 * one in order to decrypt the whole bio data *afterwards*.
		 */
		clone = bio_alloc(GFP_NOIO, bio_segments(bio));
		if (clone) {
			clone->bi_idx = 0;
			clone->bi_vcnt = bio_segments(bio);
			clone->bi_size = bio->bi_size;
			memcpy(clone->bi_io_vec, bio_iovec(bio),
			       sizeof(struct bio_vec) * clone->bi_vcnt);
		}
	}

	if (!clone)
		return NULL;

	clone->bi_private = io;
	clone->bi_end_io = crypt_endio;
	clone->bi_bdev = cc->dev->bdev;
	clone->bi_sector = cc->start + sector;
	clone->bi_rw = bio->bi_rw;

	return clone;
}

static int crypt_map(struct dm_target *ti, struct bio *bio,
		     union map_info *map_context)
{
	struct crypt_config *cc = (struct crypt_config *) ti->private;
M
Milan Broz 已提交
810
	struct crypt_io *io;
L
Linus Torvalds 已提交
811 812 813 814 815 816
	struct convert_context ctx;
	struct bio *clone;
	unsigned int remaining = bio->bi_size;
	sector_t sector = bio->bi_sector - ti->begin;
	unsigned int bvec_idx = 0;

M
Milan Broz 已提交
817
	io = mempool_alloc(cc->io_pool, GFP_NOIO);
L
Linus Torvalds 已提交
818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885
	io->target = ti;
	io->bio = bio;
	io->first_clone = NULL;
	io->error = 0;
	atomic_set(&io->pending, 1); /* hold a reference */

	if (bio_data_dir(bio) == WRITE)
		crypt_convert_init(cc, &ctx, NULL, bio, sector, 1);

	/*
	 * The allocated buffers can be smaller than the whole bio,
	 * so repeat the whole process until all the data can be handled.
	 */
	while (remaining) {
		clone = crypt_clone(cc, io, bio, sector, &bvec_idx, &ctx);
		if (!clone)
			goto cleanup;

		if (!io->first_clone) {
			/*
			 * hold a reference to the first clone, because it
			 * holds the bio_vec array and that can't be freed
			 * before all other clones are released
			 */
			bio_get(clone);
			io->first_clone = clone;
		}
		atomic_inc(&io->pending);

		remaining -= clone->bi_size;
		sector += bio_sectors(clone);

		generic_make_request(clone);

		/* out of memory -> run queues */
		if (remaining)
			blk_congestion_wait(bio_data_dir(clone), HZ/100);
	}

	/* drop reference, clones could have returned before we reach this */
	dec_pending(io, 0);
	return 0;

cleanup:
	if (io->first_clone) {
		dec_pending(io, -ENOMEM);
		return 0;
	}

	/* if no bio has been dispatched yet, we can directly return the error */
	mempool_free(io, cc->io_pool);
	return -ENOMEM;
}

static int crypt_status(struct dm_target *ti, status_type_t type,
			char *result, unsigned int maxlen)
{
	struct crypt_config *cc = (struct crypt_config *) ti->private;
	const char *cipher;
	const char *chainmode = NULL;
	unsigned int sz = 0;

	switch (type) {
	case STATUSTYPE_INFO:
		result[0] = '\0';
		break;

	case STATUSTYPE_TABLE:
886
		cipher = crypto_blkcipher_name(cc->tfm);
L
Linus Torvalds 已提交
887

888
		chainmode = cc->chainmode;
L
Linus Torvalds 已提交
889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906

		if (cc->iv_mode)
			DMEMIT("%s-%s-%s ", cipher, chainmode, cc->iv_mode);
		else
			DMEMIT("%s-%s ", cipher, chainmode);

		if (cc->key_size > 0) {
			if ((maxlen - sz) < ((cc->key_size << 1) + 1))
				return -ENOMEM;

			crypt_encode_key(result + sz, cc->key, cc->key_size);
			sz += cc->key_size << 1;
		} else {
			if (sz >= maxlen)
				return -ENOMEM;
			result[sz++] = '-';
		}

A
Andrew Morton 已提交
907 908
		DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
				cc->dev->name, (unsigned long long)cc->start);
L
Linus Torvalds 已提交
909 910 911 912 913
		break;
	}
	return 0;
}

M
Milan Broz 已提交
914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966
static void crypt_postsuspend(struct dm_target *ti)
{
	struct crypt_config *cc = ti->private;

	set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
}

static int crypt_preresume(struct dm_target *ti)
{
	struct crypt_config *cc = ti->private;

	if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
		DMERR("aborting resume - crypt key is not set.");
		return -EAGAIN;
	}

	return 0;
}

static void crypt_resume(struct dm_target *ti)
{
	struct crypt_config *cc = ti->private;

	clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
}

/* Message interface
 *	key set <key>
 *	key wipe
 */
static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
{
	struct crypt_config *cc = ti->private;

	if (argc < 2)
		goto error;

	if (!strnicmp(argv[0], MESG_STR("key"))) {
		if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
			DMWARN("not suspended during key manipulation.");
			return -EINVAL;
		}
		if (argc == 3 && !strnicmp(argv[1], MESG_STR("set")))
			return crypt_set_key(cc, argv[2]);
		if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe")))
			return crypt_wipe_key(cc);
	}

error:
	DMWARN("unrecognised message received.");
	return -EINVAL;
}

L
Linus Torvalds 已提交
967 968
static struct target_type crypt_target = {
	.name   = "crypt",
M
Milan Broz 已提交
969
	.version= {1, 2, 0},
L
Linus Torvalds 已提交
970 971 972 973 974
	.module = THIS_MODULE,
	.ctr    = crypt_ctr,
	.dtr    = crypt_dtr,
	.map    = crypt_map,
	.status = crypt_status,
M
Milan Broz 已提交
975 976 977 978
	.postsuspend = crypt_postsuspend,
	.preresume = crypt_preresume,
	.resume = crypt_resume,
	.message = crypt_message,
L
Linus Torvalds 已提交
979 980 981 982 983 984 985 986 987 988 989 990 991 992 993
};

static int __init dm_crypt_init(void)
{
	int r;

	_crypt_io_pool = kmem_cache_create("dm-crypt_io",
	                                   sizeof(struct crypt_io),
	                                   0, 0, NULL, NULL);
	if (!_crypt_io_pool)
		return -ENOMEM;

	_kcryptd_workqueue = create_workqueue("kcryptd");
	if (!_kcryptd_workqueue) {
		r = -ENOMEM;
994
		DMERR("couldn't create kcryptd");
L
Linus Torvalds 已提交
995 996 997 998 999
		goto bad1;
	}

	r = dm_register_target(&crypt_target);
	if (r < 0) {
1000
		DMERR("register failed %d", r);
L
Linus Torvalds 已提交
1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017
		goto bad2;
	}

	return 0;

bad2:
	destroy_workqueue(_kcryptd_workqueue);
bad1:
	kmem_cache_destroy(_crypt_io_pool);
	return r;
}

static void __exit dm_crypt_exit(void)
{
	int r = dm_unregister_target(&crypt_target);

	if (r < 0)
1018
		DMERR("unregister failed %d", r);
L
Linus Torvalds 已提交
1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029

	destroy_workqueue(_kcryptd_workqueue);
	kmem_cache_destroy(_crypt_io_pool);
}

module_init(dm_crypt_init);
module_exit(dm_crypt_exit);

MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
MODULE_LICENSE("GPL");