dm-crypt.c 25.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
M
Milan Broz 已提交
4
 * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
L
Linus Torvalds 已提交
5 6 7 8
 *
 * This file is released under the GPL.
 */

9
#include <linux/err.h>
L
Linus Torvalds 已提交
10 11 12 13 14 15 16 17 18
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/mempool.h>
#include <linux/slab.h>
#include <linux/crypto.h>
#include <linux/workqueue.h>
19
#include <linux/backing-dev.h>
L
Linus Torvalds 已提交
20
#include <asm/atomic.h>
21
#include <linux/scatterlist.h>
L
Linus Torvalds 已提交
22
#include <asm/page.h>
23
#include <asm/unaligned.h>
L
Linus Torvalds 已提交
24 25 26

#include "dm.h"

27
#define DM_MSG_PREFIX "crypt"
M
Milan Broz 已提交
28
#define MESG_STR(x) x, sizeof(x)
L
Linus Torvalds 已提交
29 30 31 32 33 34

/*
 * per bio private data
 */
struct crypt_io {
	struct dm_target *target;
35
	struct bio *base_bio;
L
Linus Torvalds 已提交
36 37 38 39
	struct bio *first_clone;
	struct work_struct work;
	atomic_t pending;
	int error;
40
	int post_process;
L
Linus Torvalds 已提交
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
};

/*
 * context holding the current state of a multi-part conversion
 */
struct convert_context {
	struct bio *bio_in;
	struct bio *bio_out;
	unsigned int offset_in;
	unsigned int offset_out;
	unsigned int idx_in;
	unsigned int idx_out;
	sector_t sector;
	int write;
};

struct crypt_config;

struct crypt_iv_operations {
	int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
	           const char *opts);
	void (*dtr)(struct crypt_config *cc);
	const char *(*status)(struct crypt_config *cc);
	int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector);
};

/*
 * Crypt: maps a linear range of a block device
 * and encrypts / decrypts at the same time.
 */
M
Milan Broz 已提交
71
enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
L
Linus Torvalds 已提交
72 73 74 75 76 77 78 79 80 81
struct crypt_config {
	struct dm_dev *dev;
	sector_t start;

	/*
	 * pool for per bio private data and
	 * for encryption buffer pages
	 */
	mempool_t *io_pool;
	mempool_t *page_pool;
M
Milan Broz 已提交
82
	struct bio_set *bs;
L
Linus Torvalds 已提交
83 84 85 86 87 88

	/*
	 * crypto related data
	 */
	struct crypt_iv_operations *iv_gen_ops;
	char *iv_mode;
89
	struct crypto_cipher *iv_gen_private;
L
Linus Torvalds 已提交
90 91 92
	sector_t iv_offset;
	unsigned int iv_size;

93 94 95
	char cipher[CRYPTO_MAX_ALG_NAME];
	char chainmode[CRYPTO_MAX_ALG_NAME];
	struct crypto_blkcipher *tfm;
M
Milan Broz 已提交
96
	unsigned long flags;
L
Linus Torvalds 已提交
97 98 99 100
	unsigned int key_size;
	u8 key[0];
};

M
Milan Broz 已提交
101
#define MIN_IOS        16
L
Linus Torvalds 已提交
102 103 104 105 106 107 108 109
#define MIN_POOL_PAGES 32
#define MIN_BIO_PAGES  8

static kmem_cache_t *_crypt_io_pool;

/*
 * Different IV generation algorithms:
 *
110
 * plain: the initial vector is the 32-bit little-endian version of the sector
L
Linus Torvalds 已提交
111 112
 *        number, padded with zeros if neccessary.
 *
113 114 115
 * essiv: "encrypted sector|salt initial vector", the sector number is
 *        encrypted with the bulk cipher using a salt as key. The salt
 *        should be derived from the bulk cipher's key via hashing.
L
Linus Torvalds 已提交
116
 *
117 118 119
 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
 *        (needed for LRW-32-AES and possible other narrow block modes)
 *
L
Linus Torvalds 已提交
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
 * plumb: unimplemented, see:
 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
 */

static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
	memset(iv, 0, cc->iv_size);
	*(u32 *)iv = cpu_to_le32(sector & 0xffffffff);

	return 0;
}

static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
	                      const char *opts)
{
135
	struct crypto_cipher *essiv_tfm;
136 137
	struct crypto_hash *hash_tfm;
	struct hash_desc desc;
L
Linus Torvalds 已提交
138 139 140
	struct scatterlist sg;
	unsigned int saltsize;
	u8 *salt;
141
	int err;
L
Linus Torvalds 已提交
142 143

	if (opts == NULL) {
144
		ti->error = "Digest algorithm missing for ESSIV mode";
L
Linus Torvalds 已提交
145 146 147 148
		return -EINVAL;
	}

	/* Hash the cipher key with the given hash algorithm */
149 150
	hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(hash_tfm)) {
151
		ti->error = "Error initializing ESSIV hash";
152
		return PTR_ERR(hash_tfm);
L
Linus Torvalds 已提交
153 154
	}

155
	saltsize = crypto_hash_digestsize(hash_tfm);
L
Linus Torvalds 已提交
156 157
	salt = kmalloc(saltsize, GFP_KERNEL);
	if (salt == NULL) {
158
		ti->error = "Error kmallocing salt storage in ESSIV";
159
		crypto_free_hash(hash_tfm);
L
Linus Torvalds 已提交
160 161 162
		return -ENOMEM;
	}

163
	sg_set_buf(&sg, cc->key, cc->key_size);
164 165 166 167 168 169 170 171 172
	desc.tfm = hash_tfm;
	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
	err = crypto_hash_digest(&desc, &sg, cc->key_size, salt);
	crypto_free_hash(hash_tfm);

	if (err) {
		ti->error = "Error calculating hash in ESSIV";
		return err;
	}
L
Linus Torvalds 已提交
173 174

	/* Setup the essiv_tfm with the given salt */
175 176
	essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(essiv_tfm)) {
177
		ti->error = "Error allocating crypto tfm for ESSIV";
L
Linus Torvalds 已提交
178
		kfree(salt);
179
		return PTR_ERR(essiv_tfm);
L
Linus Torvalds 已提交
180
	}
181 182
	if (crypto_cipher_blocksize(essiv_tfm) !=
	    crypto_blkcipher_ivsize(cc->tfm)) {
183
		ti->error = "Block size of ESSIV cipher does "
L
Linus Torvalds 已提交
184
			        "not match IV size of block cipher";
185
		crypto_free_cipher(essiv_tfm);
L
Linus Torvalds 已提交
186 187 188
		kfree(salt);
		return -EINVAL;
	}
189 190
	err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
	if (err) {
191
		ti->error = "Failed to set key for ESSIV cipher";
192
		crypto_free_cipher(essiv_tfm);
L
Linus Torvalds 已提交
193
		kfree(salt);
194
		return err;
L
Linus Torvalds 已提交
195 196 197
	}
	kfree(salt);

198
	cc->iv_gen_private = essiv_tfm;
L
Linus Torvalds 已提交
199 200 201 202 203
	return 0;
}

static void crypt_iv_essiv_dtr(struct crypt_config *cc)
{
204
	crypto_free_cipher(cc->iv_gen_private);
L
Linus Torvalds 已提交
205 206 207 208 209 210 211
	cc->iv_gen_private = NULL;
}

static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
	memset(iv, 0, cc->iv_size);
	*(u64 *)iv = cpu_to_le64(sector);
212
	crypto_cipher_encrypt_one(cc->iv_gen_private, iv, iv);
L
Linus Torvalds 已提交
213 214 215
	return 0;
}

216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
			      const char *opts)
{
	unsigned int bs = crypto_blkcipher_blocksize(cc->tfm);
	int log = long_log2(bs);

	/* we need to calculate how far we must shift the sector count
	 * to get the cipher block count, we use this shift in _gen */

	if (1 << log != bs) {
		ti->error = "cypher blocksize is not a power of 2";
		return -EINVAL;
	}

	if (log > 9) {
		ti->error = "cypher blocksize is > 512";
		return -EINVAL;
	}

	cc->iv_gen_private = (void *)(9 - log);

	return 0;
}

static void crypt_iv_benbi_dtr(struct crypt_config *cc)
{
	cc->iv_gen_private = NULL;
}

static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
	memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
	put_unaligned(cpu_to_be64(((u64)sector << (u32)cc->iv_gen_private) + 1),
		      (__be64 *)(iv + cc->iv_size - sizeof(u64)));

	return 0;
}

L
Linus Torvalds 已提交
254 255 256 257 258 259 260 261 262 263
static struct crypt_iv_operations crypt_iv_plain_ops = {
	.generator = crypt_iv_plain_gen
};

static struct crypt_iv_operations crypt_iv_essiv_ops = {
	.ctr       = crypt_iv_essiv_ctr,
	.dtr       = crypt_iv_essiv_dtr,
	.generator = crypt_iv_essiv_gen
};

264 265 266 267 268
static struct crypt_iv_operations crypt_iv_benbi_ops = {
	.ctr	   = crypt_iv_benbi_ctr,
	.dtr	   = crypt_iv_benbi_dtr,
	.generator = crypt_iv_benbi_gen
};
L
Linus Torvalds 已提交
269

270
static int
L
Linus Torvalds 已提交
271 272 273 274 275
crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
                          struct scatterlist *in, unsigned int length,
                          int write, sector_t sector)
{
	u8 iv[cc->iv_size];
276 277 278 279 280
	struct blkcipher_desc desc = {
		.tfm = cc->tfm,
		.info = iv,
		.flags = CRYPTO_TFM_REQ_MAY_SLEEP,
	};
L
Linus Torvalds 已提交
281 282 283 284 285 286 287 288
	int r;

	if (cc->iv_gen_ops) {
		r = cc->iv_gen_ops->generator(cc, iv, sector);
		if (r < 0)
			return r;

		if (write)
289
			r = crypto_blkcipher_encrypt_iv(&desc, out, in, length);
L
Linus Torvalds 已提交
290
		else
291
			r = crypto_blkcipher_decrypt_iv(&desc, out, in, length);
L
Linus Torvalds 已提交
292 293
	} else {
		if (write)
294
			r = crypto_blkcipher_encrypt(&desc, out, in, length);
L
Linus Torvalds 已提交
295
		else
296
			r = crypto_blkcipher_decrypt(&desc, out, in, length);
L
Linus Torvalds 已提交
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
	}

	return r;
}

static void
crypt_convert_init(struct crypt_config *cc, struct convert_context *ctx,
                   struct bio *bio_out, struct bio *bio_in,
                   sector_t sector, int write)
{
	ctx->bio_in = bio_in;
	ctx->bio_out = bio_out;
	ctx->offset_in = 0;
	ctx->offset_out = 0;
	ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
	ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
	ctx->sector = sector + cc->iv_offset;
	ctx->write = write;
}

/*
 * Encrypt / decrypt data from one bio to another one (can be the same one)
 */
static int crypt_convert(struct crypt_config *cc,
                         struct convert_context *ctx)
{
	int r = 0;

	while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
	      ctx->idx_out < ctx->bio_out->bi_vcnt) {
		struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
		struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
		struct scatterlist sg_in = {
			.page = bv_in->bv_page,
			.offset = bv_in->bv_offset + ctx->offset_in,
			.length = 1 << SECTOR_SHIFT
		};
		struct scatterlist sg_out = {
			.page = bv_out->bv_page,
			.offset = bv_out->bv_offset + ctx->offset_out,
			.length = 1 << SECTOR_SHIFT
		};

		ctx->offset_in += sg_in.length;
		if (ctx->offset_in >= bv_in->bv_len) {
			ctx->offset_in = 0;
			ctx->idx_in++;
		}

		ctx->offset_out += sg_out.length;
		if (ctx->offset_out >= bv_out->bv_len) {
			ctx->offset_out = 0;
			ctx->idx_out++;
		}

		r = crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length,
		                              ctx->write, ctx->sector);
		if (r < 0)
			break;

		ctx->sector++;
	}

	return r;
}

M
Milan Broz 已提交
363 364 365 366 367 368 369 370
 static void dm_crypt_bio_destructor(struct bio *bio)
 {
	struct crypt_io *io = bio->bi_private;
	struct crypt_config *cc = io->target->private;

	bio_free(bio, cc->bs);
 }

L
Linus Torvalds 已提交
371 372 373 374 375 376 377 378 379
/*
 * Generate a new unfragmented bio with the given size
 * This should never violate the device limitations
 * May return a smaller bio when running out of pages
 */
static struct bio *
crypt_alloc_buffer(struct crypt_config *cc, unsigned int size,
                   struct bio *base_bio, unsigned int *bio_vec_idx)
{
380
	struct bio *clone;
L
Linus Torvalds 已提交
381
	unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
382
	gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
L
Linus Torvalds 已提交
383 384
	unsigned int i;

M
Milan Broz 已提交
385 386 387 388 389 390
	if (base_bio) {
		clone = bio_alloc_bioset(GFP_NOIO, base_bio->bi_max_vecs, cc->bs);
		__bio_clone(clone, base_bio);
	} else
		clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);

391
	if (!clone)
L
Linus Torvalds 已提交
392 393
		return NULL;

M
Milan Broz 已提交
394 395
	clone->bi_destructor = dm_crypt_bio_destructor;

L
Linus Torvalds 已提交
396
	/* if the last bio was not complete, continue where that one ended */
397 398 399 400
	clone->bi_idx = *bio_vec_idx;
	clone->bi_vcnt = *bio_vec_idx;
	clone->bi_size = 0;
	clone->bi_flags &= ~(1 << BIO_SEG_VALID);
L
Linus Torvalds 已提交
401

402 403
	/* clone->bi_idx pages have already been allocated */
	size -= clone->bi_idx * PAGE_SIZE;
L
Linus Torvalds 已提交
404

405 406
	for (i = clone->bi_idx; i < nr_iovecs; i++) {
		struct bio_vec *bv = bio_iovec_idx(clone, i);
L
Linus Torvalds 已提交
407 408 409 410 411 412 413 414 415 416

		bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask);
		if (!bv->bv_page)
			break;

		/*
		 * if additional pages cannot be allocated without waiting,
		 * return a partially allocated bio, the caller will then try
		 * to allocate additional bios while submitting this partial bio
		 */
417
		if ((i - clone->bi_idx) == (MIN_BIO_PAGES - 1))
L
Linus Torvalds 已提交
418 419 420 421 422 423 424 425
			gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;

		bv->bv_offset = 0;
		if (size > PAGE_SIZE)
			bv->bv_len = PAGE_SIZE;
		else
			bv->bv_len = size;

426 427
		clone->bi_size += bv->bv_len;
		clone->bi_vcnt++;
L
Linus Torvalds 已提交
428 429 430
		size -= bv->bv_len;
	}

431 432
	if (!clone->bi_size) {
		bio_put(clone);
L
Linus Torvalds 已提交
433 434 435 436 437 438 439
		return NULL;
	}

	/*
	 * Remember the last bio_vec allocated to be able
	 * to correctly continue after the splitting.
	 */
440
	*bio_vec_idx = clone->bi_vcnt;
L
Linus Torvalds 已提交
441

442
	return clone;
L
Linus Torvalds 已提交
443 444 445
}

static void crypt_free_buffer_pages(struct crypt_config *cc,
446
                                    struct bio *clone, unsigned int bytes)
L
Linus Torvalds 已提交
447 448 449 450 451 452 453 454 455 456 457 458 459
{
	unsigned int i, start, end;
	struct bio_vec *bv;

	/*
	 * This is ugly, but Jens Axboe thinks that using bi_idx in the
	 * endio function is too dangerous at the moment, so I calculate the
	 * correct position using bi_vcnt and bi_size.
	 * The bv_offset and bv_len fields might already be modified but we
	 * know that we always allocated whole pages.
	 * A fix to the bi_idx issue in the kernel is in the works, so
	 * we will hopefully be able to revert to the cleaner solution soon.
	 */
460 461 462
	i = clone->bi_vcnt - 1;
	bv = bio_iovec_idx(clone, i);
	end = (i << PAGE_SHIFT) + (bv->bv_offset + bv->bv_len) - clone->bi_size;
L
Linus Torvalds 已提交
463 464 465
	start = end - bytes;

	start >>= PAGE_SHIFT;
466 467
	if (!clone->bi_size)
		end = clone->bi_vcnt;
L
Linus Torvalds 已提交
468 469 470
	else
		end >>= PAGE_SHIFT;

471 472
	for (i = start; i < end; i++) {
		bv = bio_iovec_idx(clone, i);
L
Linus Torvalds 已提交
473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495
		BUG_ON(!bv->bv_page);
		mempool_free(bv->bv_page, cc->page_pool);
		bv->bv_page = NULL;
	}
}

/*
 * One of the bios was finished. Check for completion of
 * the whole request and correctly clean up the buffer.
 */
static void dec_pending(struct crypt_io *io, int error)
{
	struct crypt_config *cc = (struct crypt_config *) io->target->private;

	if (error < 0)
		io->error = error;

	if (!atomic_dec_and_test(&io->pending))
		return;

	if (io->first_clone)
		bio_put(io->first_clone);

496
	bio_endio(io->base_bio, io->base_bio->bi_size, io->error);
L
Linus Torvalds 已提交
497 498 499 500 501 502 503 504

	mempool_free(io, cc->io_pool);
}

/*
 * kcryptd:
 *
 * Needed because it would be very unwise to do decryption in an
505
 * interrupt context.
L
Linus Torvalds 已提交
506 507
 */
static struct workqueue_struct *_kcryptd_workqueue;
D
David Howells 已提交
508
static void kcryptd_do_work(struct work_struct *work);
L
Linus Torvalds 已提交
509

510
static void kcryptd_queue_io(struct crypt_io *io)
L
Linus Torvalds 已提交
511
{
D
David Howells 已提交
512
	INIT_WORK(&io->work, kcryptd_do_work);
513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528
	queue_work(_kcryptd_workqueue, &io->work);
}

static int crypt_endio(struct bio *clone, unsigned int done, int error)
{
	struct crypt_io *io = clone->bi_private;
	struct crypt_config *cc = io->target->private;
	unsigned read_io = bio_data_dir(clone) == READ;

	/*
	 * free the processed pages, even if
	 * it's only a partially completed write
	 */
	if (!read_io)
		crypt_free_buffer_pages(cc, clone, done);

529
	/* keep going - not finished yet */
530 531 532 533 534 535 536 537 538 539 540 541
	if (unlikely(clone->bi_size))
		return 1;

	if (!read_io)
		goto out;

	if (unlikely(!bio_flagged(clone, BIO_UPTODATE))) {
		error = -EIO;
		goto out;
	}

	bio_put(clone);
542
	io->post_process = 1;
543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561
	kcryptd_queue_io(io);
	return 0;

out:
	bio_put(clone);
	dec_pending(io, error);
	return error;
}

static void clone_init(struct crypt_io *io, struct bio *clone)
{
	struct crypt_config *cc = io->target->private;

	clone->bi_private = io;
	clone->bi_end_io  = crypt_endio;
	clone->bi_bdev    = cc->dev->bdev;
	clone->bi_rw      = io->base_bio->bi_rw;
}

562
static void process_read(struct crypt_io *io)
563 564 565 566
{
	struct crypt_config *cc = io->target->private;
	struct bio *base_bio = io->base_bio;
	struct bio *clone;
567 568 569
	sector_t sector = base_bio->bi_sector - io->target->begin;

	atomic_inc(&io->pending);
570 571 572 573 574 575

	/*
	 * The block layer might modify the bvec array, so always
	 * copy the required bvecs because we need the original
	 * one in order to decrypt the whole bio data *afterwards*.
	 */
M
Milan Broz 已提交
576
	clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs);
577 578
	if (unlikely(!clone)) {
		dec_pending(io, -ENOMEM);
579
		return;
580
	}
581 582

	clone_init(io, clone);
M
Milan Broz 已提交
583
	clone->bi_destructor = dm_crypt_bio_destructor;
584 585 586
	clone->bi_idx = 0;
	clone->bi_vcnt = bio_segments(base_bio);
	clone->bi_size = base_bio->bi_size;
587
	clone->bi_sector = cc->start + sector;
588 589 590
	memcpy(clone->bi_io_vec, bio_iovec(base_bio),
	       sizeof(struct bio_vec) * clone->bi_vcnt);

591
	generic_make_request(clone);
592 593
}

594
static void process_write(struct crypt_io *io)
595 596 597 598
{
	struct crypt_config *cc = io->target->private;
	struct bio *base_bio = io->base_bio;
	struct bio *clone;
599 600 601 602
	struct convert_context ctx;
	unsigned remaining = base_bio->bi_size;
	sector_t sector = base_bio->bi_sector - io->target->begin;
	unsigned bvec_idx = 0;
603

604
	atomic_inc(&io->pending);
605

606
	crypt_convert_init(cc, &ctx, NULL, base_bio, sector, 1);
607

608 609 610 611 612 613 614
	/*
	 * The allocated buffers can be smaller than the whole bio,
	 * so repeat the whole process until all the data can be handled.
	 */
	while (remaining) {
		clone = crypt_alloc_buffer(cc, base_bio->bi_size,
					   io->first_clone, &bvec_idx);
615 616 617 618
		if (unlikely(!clone)) {
			dec_pending(io, -ENOMEM);
			return;
		}
619 620 621 622 623 624

		ctx.bio_out = clone;

		if (unlikely(crypt_convert(cc, &ctx) < 0)) {
			crypt_free_buffer_pages(cc, clone, clone->bi_size);
			bio_put(clone);
625 626
			dec_pending(io, -EIO);
			return;
627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644
		}

		clone_init(io, clone);
		clone->bi_sector = cc->start + sector;

		if (!io->first_clone) {
			/*
			 * hold a reference to the first clone, because it
			 * holds the bio_vec array and that can't be freed
			 * before all other clones are released
			 */
			bio_get(clone);
			io->first_clone = clone;
		}

		remaining -= clone->bi_size;
		sector += bio_sectors(clone);

645 646 647 648
		/* prevent bio_put of first_clone */
		if (remaining)
			atomic_inc(&io->pending);

649 650 651 652
		generic_make_request(clone);

		/* out of memory -> run queues */
		if (remaining)
653
			congestion_wait(bio_data_dir(clone), HZ/100);
654
	}
655 656 657 658 659
}

static void process_read_endio(struct crypt_io *io)
{
	struct crypt_config *cc = io->target->private;
L
Linus Torvalds 已提交
660 661
	struct convert_context ctx;

662 663
	crypt_convert_init(cc, &ctx, io->base_bio, io->base_bio,
			   io->base_bio->bi_sector - io->target->begin, 0);
L
Linus Torvalds 已提交
664

665
	dec_pending(io, crypt_convert(cc, &ctx));
L
Linus Torvalds 已提交
666 667
}

D
David Howells 已提交
668
static void kcryptd_do_work(struct work_struct *work)
L
Linus Torvalds 已提交
669
{
D
David Howells 已提交
670
	struct crypt_io *io = container_of(work, struct crypt_io, work);
671

672 673 674 675 676 677
	if (io->post_process)
		process_read_endio(io);
	else if (bio_data_dir(io->base_bio) == READ)
		process_read(io);
	else
		process_write(io);
L
Linus Torvalds 已提交
678 679 680 681 682 683 684 685 686 687 688 689 690
}

/*
 * Decode key from its hex representation
 */
static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
{
	char buffer[3];
	char *endp;
	unsigned int i;

	buffer[2] = '\0';

691
	for (i = 0; i < size; i++) {
L
Linus Torvalds 已提交
692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713
		buffer[0] = *hex++;
		buffer[1] = *hex++;

		key[i] = (u8)simple_strtoul(buffer, &endp, 16);

		if (endp != &buffer[2])
			return -EINVAL;
	}

	if (*hex != '\0')
		return -EINVAL;

	return 0;
}

/*
 * Encode key into its hex representation
 */
static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
{
	unsigned int i;

714
	for (i = 0; i < size; i++) {
L
Linus Torvalds 已提交
715 716 717 718 719 720
		sprintf(hex, "%02x", *key);
		hex += 2;
		key++;
	}
}

M
Milan Broz 已提交
721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745
static int crypt_set_key(struct crypt_config *cc, char *key)
{
	unsigned key_size = strlen(key) >> 1;

	if (cc->key_size && cc->key_size != key_size)
		return -EINVAL;

	cc->key_size = key_size; /* initial settings */

	if ((!key_size && strcmp(key, "-")) ||
	    (key_size && crypt_decode_key(cc->key, key, key_size) < 0))
		return -EINVAL;

	set_bit(DM_CRYPT_KEY_VALID, &cc->flags);

	return 0;
}

static int crypt_wipe_key(struct crypt_config *cc)
{
	clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
	memset(&cc->key, 0, cc->key_size * sizeof(u8));
	return 0;
}

L
Linus Torvalds 已提交
746 747 748 749 750 751 752
/*
 * Construct an encryption mapping:
 * <cipher> <key> <iv_offset> <dev_path> <start>
 */
static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
	struct crypt_config *cc;
753
	struct crypto_blkcipher *tfm;
L
Linus Torvalds 已提交
754 755 756 757 758 759
	char *tmp;
	char *cipher;
	char *chainmode;
	char *ivmode;
	char *ivopts;
	unsigned int key_size;
A
Andrew Morton 已提交
760
	unsigned long long tmpll;
L
Linus Torvalds 已提交
761 762

	if (argc != 5) {
763
		ti->error = "Not enough arguments";
L
Linus Torvalds 已提交
764 765 766 767 768 769 770 771 772 773
		return -EINVAL;
	}

	tmp = argv[0];
	cipher = strsep(&tmp, "-");
	chainmode = strsep(&tmp, "-");
	ivopts = strsep(&tmp, "-");
	ivmode = strsep(&ivopts, ":");

	if (tmp)
774
		DMWARN("Unexpected additional cipher options");
L
Linus Torvalds 已提交
775 776 777

	key_size = strlen(argv[1]) >> 1;

M
Milan Broz 已提交
778
 	cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
L
Linus Torvalds 已提交
779 780
	if (cc == NULL) {
		ti->error =
781
			"Cannot allocate transparent encryption context";
L
Linus Torvalds 已提交
782 783 784
		return -ENOMEM;
	}

M
Milan Broz 已提交
785
 	if (crypt_set_key(cc, argv[1])) {
786
		ti->error = "Error decoding key";
L
Linus Torvalds 已提交
787 788 789 790 791 792 793 794 795
		goto bad1;
	}

	/* Compatiblity mode for old dm-crypt cipher strings */
	if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) {
		chainmode = "cbc";
		ivmode = "plain";
	}

796 797
	if (strcmp(chainmode, "ecb") && !ivmode) {
		ti->error = "This chaining mode requires an IV mechanism";
L
Linus Torvalds 已提交
798 799 800
		goto bad1;
	}

801 802 803
	if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)", chainmode, 
		     cipher) >= CRYPTO_MAX_ALG_NAME) {
		ti->error = "Chain mode + cipher name is too long";
L
Linus Torvalds 已提交
804 805 806
		goto bad1;
	}

807 808
	tfm = crypto_alloc_blkcipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(tfm)) {
809
		ti->error = "Error allocating crypto tfm";
L
Linus Torvalds 已提交
810 811 812
		goto bad1;
	}

813 814
	strcpy(cc->cipher, cipher);
	strcpy(cc->chainmode, chainmode);
L
Linus Torvalds 已提交
815 816 817
	cc->tfm = tfm;

	/*
818
	 * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi".
L
Linus Torvalds 已提交
819 820 821 822 823 824 825 826 827
	 * See comments at iv code
	 */

	if (ivmode == NULL)
		cc->iv_gen_ops = NULL;
	else if (strcmp(ivmode, "plain") == 0)
		cc->iv_gen_ops = &crypt_iv_plain_ops;
	else if (strcmp(ivmode, "essiv") == 0)
		cc->iv_gen_ops = &crypt_iv_essiv_ops;
828 829
	else if (strcmp(ivmode, "benbi") == 0)
		cc->iv_gen_ops = &crypt_iv_benbi_ops;
L
Linus Torvalds 已提交
830
	else {
831
		ti->error = "Invalid IV mode";
L
Linus Torvalds 已提交
832 833 834 835 836 837 838
		goto bad2;
	}

	if (cc->iv_gen_ops && cc->iv_gen_ops->ctr &&
	    cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0)
		goto bad2;

839 840
	cc->iv_size = crypto_blkcipher_ivsize(tfm);
	if (cc->iv_size)
L
Linus Torvalds 已提交
841
		/* at least a 64 bit sector number should fit in our buffer */
842
		cc->iv_size = max(cc->iv_size,
L
Linus Torvalds 已提交
843 844 845
		                  (unsigned int)(sizeof(u64) / sizeof(u8)));
	else {
		if (cc->iv_gen_ops) {
846
			DMWARN("Selected cipher does not support IVs");
L
Linus Torvalds 已提交
847 848 849 850 851 852
			if (cc->iv_gen_ops->dtr)
				cc->iv_gen_ops->dtr(cc);
			cc->iv_gen_ops = NULL;
		}
	}

853
	cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
L
Linus Torvalds 已提交
854
	if (!cc->io_pool) {
855
		ti->error = "Cannot allocate crypt io mempool";
L
Linus Torvalds 已提交
856 857 858
		goto bad3;
	}

859
	cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
L
Linus Torvalds 已提交
860
	if (!cc->page_pool) {
861
		ti->error = "Cannot allocate page mempool";
L
Linus Torvalds 已提交
862 863 864
		goto bad4;
	}

M
Milan Broz 已提交
865 866 867 868 869 870
	cc->bs = bioset_create(MIN_IOS, MIN_IOS, 4);
	if (!cc->bs) {
		ti->error = "Cannot allocate crypt bioset";
		goto bad_bs;
	}

871
	if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) {
872
		ti->error = "Error setting key";
L
Linus Torvalds 已提交
873 874 875
		goto bad5;
	}

A
Andrew Morton 已提交
876
	if (sscanf(argv[2], "%llu", &tmpll) != 1) {
877
		ti->error = "Invalid iv_offset sector";
L
Linus Torvalds 已提交
878 879
		goto bad5;
	}
A
Andrew Morton 已提交
880
	cc->iv_offset = tmpll;
L
Linus Torvalds 已提交
881

A
Andrew Morton 已提交
882
	if (sscanf(argv[4], "%llu", &tmpll) != 1) {
883
		ti->error = "Invalid device sector";
L
Linus Torvalds 已提交
884 885
		goto bad5;
	}
A
Andrew Morton 已提交
886
	cc->start = tmpll;
L
Linus Torvalds 已提交
887 888 889

	if (dm_get_device(ti, argv[3], cc->start, ti->len,
	                  dm_table_get_mode(ti->table), &cc->dev)) {
890
		ti->error = "Device lookup failed";
L
Linus Torvalds 已提交
891 892 893 894 895 896 897 898
		goto bad5;
	}

	if (ivmode && cc->iv_gen_ops) {
		if (ivopts)
			*(ivopts - 1) = ':';
		cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL);
		if (!cc->iv_mode) {
899
			ti->error = "Error kmallocing iv_mode string";
L
Linus Torvalds 已提交
900 901 902 903 904 905 906 907 908 909
			goto bad5;
		}
		strcpy(cc->iv_mode, ivmode);
	} else
		cc->iv_mode = NULL;

	ti->private = cc;
	return 0;

bad5:
M
Milan Broz 已提交
910 911
	bioset_free(cc->bs);
bad_bs:
L
Linus Torvalds 已提交
912 913 914 915 916 917 918
	mempool_destroy(cc->page_pool);
bad4:
	mempool_destroy(cc->io_pool);
bad3:
	if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
		cc->iv_gen_ops->dtr(cc);
bad2:
919
	crypto_free_blkcipher(tfm);
L
Linus Torvalds 已提交
920
bad1:
921 922
	/* Must zero key material before freeing */
	memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
L
Linus Torvalds 已提交
923 924 925 926 927 928 929 930
	kfree(cc);
	return -EINVAL;
}

static void crypt_dtr(struct dm_target *ti)
{
	struct crypt_config *cc = (struct crypt_config *) ti->private;

M
Milan Broz 已提交
931
	bioset_free(cc->bs);
L
Linus Torvalds 已提交
932 933 934
	mempool_destroy(cc->page_pool);
	mempool_destroy(cc->io_pool);

935
	kfree(cc->iv_mode);
L
Linus Torvalds 已提交
936 937
	if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
		cc->iv_gen_ops->dtr(cc);
938
	crypto_free_blkcipher(cc->tfm);
L
Linus Torvalds 已提交
939
	dm_put_device(ti, cc->dev);
940 941 942

	/* Must zero key material before freeing */
	memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
L
Linus Torvalds 已提交
943 944 945 946 947 948
	kfree(cc);
}

static int crypt_map(struct dm_target *ti, struct bio *bio,
		     union map_info *map_context)
{
949
	struct crypt_config *cc = ti->private;
M
Milan Broz 已提交
950
	struct crypt_io *io;
L
Linus Torvalds 已提交
951

M
Milan Broz 已提交
952
	io = mempool_alloc(cc->io_pool, GFP_NOIO);
L
Linus Torvalds 已提交
953
	io->target = ti;
954
	io->base_bio = bio;
L
Linus Torvalds 已提交
955
	io->first_clone = NULL;
956
	io->error = io->post_process = 0;
957
	atomic_set(&io->pending, 0);
958
	kcryptd_queue_io(io);
L
Linus Torvalds 已提交
959

960
	return 0;
L
Linus Torvalds 已提交
961 962 963 964 965 966 967 968 969 970 971 972 973 974 975
}

static int crypt_status(struct dm_target *ti, status_type_t type,
			char *result, unsigned int maxlen)
{
	struct crypt_config *cc = (struct crypt_config *) ti->private;
	unsigned int sz = 0;

	switch (type) {
	case STATUSTYPE_INFO:
		result[0] = '\0';
		break;

	case STATUSTYPE_TABLE:
		if (cc->iv_mode)
976 977
			DMEMIT("%s-%s-%s ", cc->cipher, cc->chainmode,
			       cc->iv_mode);
L
Linus Torvalds 已提交
978
		else
979
			DMEMIT("%s-%s ", cc->cipher, cc->chainmode);
L
Linus Torvalds 已提交
980 981 982 983 984 985 986 987 988 989 990 991 992

		if (cc->key_size > 0) {
			if ((maxlen - sz) < ((cc->key_size << 1) + 1))
				return -ENOMEM;

			crypt_encode_key(result + sz, cc->key, cc->key_size);
			sz += cc->key_size << 1;
		} else {
			if (sz >= maxlen)
				return -ENOMEM;
			result[sz++] = '-';
		}

A
Andrew Morton 已提交
993 994
		DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
				cc->dev->name, (unsigned long long)cc->start);
L
Linus Torvalds 已提交
995 996 997 998 999
		break;
	}
	return 0;
}

M
Milan Broz 已提交
1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052
static void crypt_postsuspend(struct dm_target *ti)
{
	struct crypt_config *cc = ti->private;

	set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
}

static int crypt_preresume(struct dm_target *ti)
{
	struct crypt_config *cc = ti->private;

	if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
		DMERR("aborting resume - crypt key is not set.");
		return -EAGAIN;
	}

	return 0;
}

static void crypt_resume(struct dm_target *ti)
{
	struct crypt_config *cc = ti->private;

	clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
}

/* Message interface
 *	key set <key>
 *	key wipe
 */
static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
{
	struct crypt_config *cc = ti->private;

	if (argc < 2)
		goto error;

	if (!strnicmp(argv[0], MESG_STR("key"))) {
		if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
			DMWARN("not suspended during key manipulation.");
			return -EINVAL;
		}
		if (argc == 3 && !strnicmp(argv[1], MESG_STR("set")))
			return crypt_set_key(cc, argv[2]);
		if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe")))
			return crypt_wipe_key(cc);
	}

error:
	DMWARN("unrecognised message received.");
	return -EINVAL;
}

L
Linus Torvalds 已提交
1053 1054
static struct target_type crypt_target = {
	.name   = "crypt",
1055
	.version= {1, 3, 0},
L
Linus Torvalds 已提交
1056 1057 1058 1059 1060
	.module = THIS_MODULE,
	.ctr    = crypt_ctr,
	.dtr    = crypt_dtr,
	.map    = crypt_map,
	.status = crypt_status,
M
Milan Broz 已提交
1061 1062 1063 1064
	.postsuspend = crypt_postsuspend,
	.preresume = crypt_preresume,
	.resume = crypt_resume,
	.message = crypt_message,
L
Linus Torvalds 已提交
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079
};

static int __init dm_crypt_init(void)
{
	int r;

	_crypt_io_pool = kmem_cache_create("dm-crypt_io",
	                                   sizeof(struct crypt_io),
	                                   0, 0, NULL, NULL);
	if (!_crypt_io_pool)
		return -ENOMEM;

	_kcryptd_workqueue = create_workqueue("kcryptd");
	if (!_kcryptd_workqueue) {
		r = -ENOMEM;
1080
		DMERR("couldn't create kcryptd");
L
Linus Torvalds 已提交
1081 1082 1083 1084 1085
		goto bad1;
	}

	r = dm_register_target(&crypt_target);
	if (r < 0) {
1086
		DMERR("register failed %d", r);
L
Linus Torvalds 已提交
1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
		goto bad2;
	}

	return 0;

bad2:
	destroy_workqueue(_kcryptd_workqueue);
bad1:
	kmem_cache_destroy(_crypt_io_pool);
	return r;
}

static void __exit dm_crypt_exit(void)
{
	int r = dm_unregister_target(&crypt_target);

	if (r < 0)
1104
		DMERR("unregister failed %d", r);
L
Linus Torvalds 已提交
1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115

	destroy_workqueue(_kcryptd_workqueue);
	kmem_cache_destroy(_crypt_io_pool);
}

module_init(dm_crypt_init);
module_exit(dm_crypt_exit);

MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
MODULE_LICENSE("GPL");