dm-crypt.c 32.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
4
 * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
L
Linus Torvalds 已提交
5 6 7 8
 *
 * This file is released under the GPL.
 */

M
Milan Broz 已提交
9
#include <linux/completion.h>
10
#include <linux/err.h>
L
Linus Torvalds 已提交
11 12 13 14 15 16 17 18 19
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/mempool.h>
#include <linux/slab.h>
#include <linux/crypto.h>
#include <linux/workqueue.h>
20
#include <linux/backing-dev.h>
L
Linus Torvalds 已提交
21
#include <asm/atomic.h>
22
#include <linux/scatterlist.h>
L
Linus Torvalds 已提交
23
#include <asm/page.h>
24
#include <asm/unaligned.h>
L
Linus Torvalds 已提交
25

26
#include <linux/device-mapper.h>
L
Linus Torvalds 已提交
27

28
#define DM_MSG_PREFIX "crypt"
M
Milan Broz 已提交
29
#define MESG_STR(x) x, sizeof(x)
L
Linus Torvalds 已提交
30 31 32 33 34

/*
 * context holding the current state of a multi-part conversion
 */
struct convert_context {
M
Milan Broz 已提交
35
	struct completion restart;
L
Linus Torvalds 已提交
36 37 38 39 40 41 42
	struct bio *bio_in;
	struct bio *bio_out;
	unsigned int offset_in;
	unsigned int offset_out;
	unsigned int idx_in;
	unsigned int idx_out;
	sector_t sector;
M
Milan Broz 已提交
43
	atomic_t pending;
L
Linus Torvalds 已提交
44 45
};

46 47 48 49 50 51 52 53 54 55 56 57
/*
 * per bio private data
 */
struct dm_crypt_io {
	struct dm_target *target;
	struct bio *base_bio;
	struct work_struct work;

	struct convert_context ctx;

	atomic_t pending;
	int error;
58
	sector_t sector;
M
Milan Broz 已提交
59
	struct dm_crypt_io *base_io;
60 61
};

62
struct dm_crypt_request {
63
	struct convert_context *ctx;
64 65 66 67
	struct scatterlist sg_in;
	struct scatterlist sg_out;
};

L
Linus Torvalds 已提交
68 69 70 71
struct crypt_config;

struct crypt_iv_operations {
	int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
M
Milan Broz 已提交
72
		   const char *opts);
L
Linus Torvalds 已提交
73
	void (*dtr)(struct crypt_config *cc);
74
	int (*init)(struct crypt_config *cc);
75
	int (*wipe)(struct crypt_config *cc);
L
Linus Torvalds 已提交
76 77 78
	int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector);
};

79 80
struct iv_essiv_private {
	struct crypto_cipher *tfm;
81 82
	struct crypto_hash *hash_tfm;
	u8 *salt;
83 84 85 86 87 88
};

struct iv_benbi_private {
	int shift;
};

L
Linus Torvalds 已提交
89 90 91 92
/*
 * Crypt: maps a linear range of a block device
 * and encrypts / decrypts at the same time.
 */
M
Milan Broz 已提交
93
enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
L
Linus Torvalds 已提交
94 95 96 97 98
struct crypt_config {
	struct dm_dev *dev;
	sector_t start;

	/*
M
Milan Broz 已提交
99 100
	 * pool for per bio private data, crypto requests and
	 * encryption requeusts/buffer pages
L
Linus Torvalds 已提交
101 102
	 */
	mempool_t *io_pool;
M
Milan Broz 已提交
103
	mempool_t *req_pool;
L
Linus Torvalds 已提交
104
	mempool_t *page_pool;
M
Milan Broz 已提交
105
	struct bio_set *bs;
L
Linus Torvalds 已提交
106

M
Milan Broz 已提交
107 108
	struct workqueue_struct *io_queue;
	struct workqueue_struct *crypt_queue;
M
Milan Broz 已提交
109

L
Linus Torvalds 已提交
110 111 112 113 114
	/*
	 * crypto related data
	 */
	struct crypt_iv_operations *iv_gen_ops;
	char *iv_mode;
115
	union {
116 117
		struct iv_essiv_private essiv;
		struct iv_benbi_private benbi;
118
	} iv_gen_private;
L
Linus Torvalds 已提交
119 120 121
	sector_t iv_offset;
	unsigned int iv_size;

M
Milan Broz 已提交
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
	/*
	 * Layout of each crypto request:
	 *
	 *   struct ablkcipher_request
	 *      context
	 *      padding
	 *   struct dm_crypt_request
	 *      padding
	 *   IV
	 *
	 * The padding is added so that dm_crypt_request and the IV are
	 * correctly aligned.
	 */
	unsigned int dmreq_start;
	struct ablkcipher_request *req;

138 139
	char cipher[CRYPTO_MAX_ALG_NAME];
	char chainmode[CRYPTO_MAX_ALG_NAME];
M
Milan Broz 已提交
140
	struct crypto_ablkcipher *tfm;
M
Milan Broz 已提交
141
	unsigned long flags;
L
Linus Torvalds 已提交
142 143 144 145
	unsigned int key_size;
	u8 key[0];
};

M
Milan Broz 已提交
146
#define MIN_IOS        16
L
Linus Torvalds 已提交
147 148 149
#define MIN_POOL_PAGES 32
#define MIN_BIO_PAGES  8

150
static struct kmem_cache *_crypt_io_pool;
L
Linus Torvalds 已提交
151

A
Alasdair G Kergon 已提交
152
static void clone_init(struct dm_crypt_io *, struct bio *);
153
static void kcryptd_queue_crypt(struct dm_crypt_io *io);
O
Olaf Kirch 已提交
154

L
Linus Torvalds 已提交
155 156 157
/*
 * Different IV generation algorithms:
 *
158
 * plain: the initial vector is the 32-bit little-endian version of the sector
159
 *        number, padded with zeros if necessary.
L
Linus Torvalds 已提交
160
 *
M
Milan Broz 已提交
161 162 163
 * plain64: the initial vector is the 64-bit little-endian version of the sector
 *        number, padded with zeros if necessary.
 *
164 165 166
 * essiv: "encrypted sector|salt initial vector", the sector number is
 *        encrypted with the bulk cipher using a salt as key. The salt
 *        should be derived from the bulk cipher's key via hashing.
L
Linus Torvalds 已提交
167
 *
168 169 170
 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
 *        (needed for LRW-32-AES and possible other narrow block modes)
 *
L
Ludwig Nussel 已提交
171 172 173
 * null: the initial vector is always zero.  Provides compatibility with
 *       obsolete loop_fish2 devices.  Do not use for new devices.
 *
L
Linus Torvalds 已提交
174 175 176 177 178 179 180 181 182 183 184 185
 * plumb: unimplemented, see:
 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
 */

static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
	memset(iv, 0, cc->iv_size);
	*(u32 *)iv = cpu_to_le32(sector & 0xffffffff);

	return 0;
}

M
Milan Broz 已提交
186 187 188 189 190 191 192 193 194
static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
				sector_t sector)
{
	memset(iv, 0, cc->iv_size);
	*(u64 *)iv = cpu_to_le64(sector);

	return 0;
}

195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
/* Initialise ESSIV - compute salt but no local memory allocations */
static int crypt_iv_essiv_init(struct crypt_config *cc)
{
	struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
	struct hash_desc desc;
	struct scatterlist sg;
	int err;

	sg_init_one(&sg, cc->key, cc->key_size);
	desc.tfm = essiv->hash_tfm;
	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;

	err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt);
	if (err)
		return err;

	return crypto_cipher_setkey(essiv->tfm, essiv->salt,
				    crypto_hash_digestsize(essiv->hash_tfm));
}

215 216 217 218 219 220 221 222 223 224 225
/* Wipe salt and reset key derived from volume key */
static int crypt_iv_essiv_wipe(struct crypt_config *cc)
{
	struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
	unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);

	memset(essiv->salt, 0, salt_size);

	return crypto_cipher_setkey(essiv->tfm, essiv->salt, salt_size);
}

226 227 228 229 230 231
static void crypt_iv_essiv_dtr(struct crypt_config *cc)
{
	struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;

	crypto_free_cipher(essiv->tfm);
	essiv->tfm = NULL;
232 233 234 235 236 237

	crypto_free_hash(essiv->hash_tfm);
	essiv->hash_tfm = NULL;

	kzfree(essiv->salt);
	essiv->salt = NULL;
238 239
}

L
Linus Torvalds 已提交
240
static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
M
Milan Broz 已提交
241
			      const char *opts)
L
Linus Torvalds 已提交
242
{
243 244 245
	struct crypto_cipher *essiv_tfm = NULL;
	struct crypto_hash *hash_tfm = NULL;
	u8 *salt = NULL;
246
	int err;
L
Linus Torvalds 已提交
247

248
	if (!opts) {
249
		ti->error = "Digest algorithm missing for ESSIV mode";
L
Linus Torvalds 已提交
250 251 252
		return -EINVAL;
	}

253
	/* Allocate hash algorithm */
254 255
	hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(hash_tfm)) {
256
		ti->error = "Error initializing ESSIV hash";
257 258
		err = PTR_ERR(hash_tfm);
		goto bad;
L
Linus Torvalds 已提交
259 260
	}

261
	salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL);
262
	if (!salt) {
263
		ti->error = "Error kmallocing salt storage in ESSIV";
264 265
		err = -ENOMEM;
		goto bad;
L
Linus Torvalds 已提交
266 267
	}

268
	/* Allocate essiv_tfm */
269 270
	essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(essiv_tfm)) {
271
		ti->error = "Error allocating crypto tfm for ESSIV";
272 273
		err = PTR_ERR(essiv_tfm);
		goto bad;
L
Linus Torvalds 已提交
274
	}
275
	if (crypto_cipher_blocksize(essiv_tfm) !=
M
Milan Broz 已提交
276
	    crypto_ablkcipher_ivsize(cc->tfm)) {
277
		ti->error = "Block size of ESSIV cipher does "
M
Milan Broz 已提交
278
			    "not match IV size of block cipher";
279 280
		err = -EINVAL;
		goto bad;
L
Linus Torvalds 已提交
281 282
	}

283
	cc->iv_gen_private.essiv.salt = salt;
284
	cc->iv_gen_private.essiv.tfm = essiv_tfm;
285 286
	cc->iv_gen_private.essiv.hash_tfm = hash_tfm;

L
Linus Torvalds 已提交
287
	return 0;
288 289 290 291 292 293

bad:
	if (essiv_tfm && !IS_ERR(essiv_tfm))
		crypto_free_cipher(essiv_tfm);
	if (hash_tfm && !IS_ERR(hash_tfm))
		crypto_free_hash(hash_tfm);
294
	kfree(salt);
295
	return err;
L
Linus Torvalds 已提交
296 297 298 299 300 301
}

static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
	memset(iv, 0, cc->iv_size);
	*(u64 *)iv = cpu_to_le64(sector);
302
	crypto_cipher_encrypt_one(cc->iv_gen_private.essiv.tfm, iv, iv);
L
Linus Torvalds 已提交
303 304 305
	return 0;
}

306 307 308
static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
			      const char *opts)
{
M
Milan Broz 已提交
309
	unsigned bs = crypto_ablkcipher_blocksize(cc->tfm);
310
	int log = ilog2(bs);
311 312 313 314 315 316 317 318 319 320 321 322 323 324

	/* we need to calculate how far we must shift the sector count
	 * to get the cipher block count, we use this shift in _gen */

	if (1 << log != bs) {
		ti->error = "cypher blocksize is not a power of 2";
		return -EINVAL;
	}

	if (log > 9) {
		ti->error = "cypher blocksize is > 512";
		return -EINVAL;
	}

325
	cc->iv_gen_private.benbi.shift = 9 - log;
326 327 328 329 330 331 332 333 334 335

	return 0;
}

static void crypt_iv_benbi_dtr(struct crypt_config *cc)
{
}

static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
336 337
	__be64 val;

338
	memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
339

340
	val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi.shift) + 1);
341
	put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
342

L
Linus Torvalds 已提交
343 344 345
	return 0;
}

L
Ludwig Nussel 已提交
346 347 348 349 350 351 352
static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
	memset(iv, 0, cc->iv_size);

	return 0;
}

L
Linus Torvalds 已提交
353 354 355 356
static struct crypt_iv_operations crypt_iv_plain_ops = {
	.generator = crypt_iv_plain_gen
};

M
Milan Broz 已提交
357 358 359 360
static struct crypt_iv_operations crypt_iv_plain64_ops = {
	.generator = crypt_iv_plain64_gen
};

L
Linus Torvalds 已提交
361 362 363
static struct crypt_iv_operations crypt_iv_essiv_ops = {
	.ctr       = crypt_iv_essiv_ctr,
	.dtr       = crypt_iv_essiv_dtr,
364
	.init      = crypt_iv_essiv_init,
365
	.wipe      = crypt_iv_essiv_wipe,
L
Linus Torvalds 已提交
366 367 368
	.generator = crypt_iv_essiv_gen
};

369 370 371 372 373
static struct crypt_iv_operations crypt_iv_benbi_ops = {
	.ctr	   = crypt_iv_benbi_ctr,
	.dtr	   = crypt_iv_benbi_dtr,
	.generator = crypt_iv_benbi_gen
};
L
Linus Torvalds 已提交
374

L
Ludwig Nussel 已提交
375 376 377 378
static struct crypt_iv_operations crypt_iv_null_ops = {
	.generator = crypt_iv_null_gen
};

M
Milan Broz 已提交
379 380 381
static void crypt_convert_init(struct crypt_config *cc,
			       struct convert_context *ctx,
			       struct bio *bio_out, struct bio *bio_in,
382
			       sector_t sector)
L
Linus Torvalds 已提交
383 384 385 386 387 388 389 390
{
	ctx->bio_in = bio_in;
	ctx->bio_out = bio_out;
	ctx->offset_in = 0;
	ctx->offset_out = 0;
	ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
	ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
	ctx->sector = sector + cc->iv_offset;
M
Milan Broz 已提交
391
	init_completion(&ctx->restart);
L
Linus Torvalds 已提交
392 393
}

394 395 396 397 398 399 400 401 402 403 404 405
static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
					     struct ablkcipher_request *req)
{
	return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
}

static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
					       struct dm_crypt_request *dmreq)
{
	return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
}

406
static int crypt_convert_block(struct crypt_config *cc,
M
Milan Broz 已提交
407 408
			       struct convert_context *ctx,
			       struct ablkcipher_request *req)
409 410 411
{
	struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
	struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
M
Milan Broz 已提交
412 413 414 415
	struct dm_crypt_request *dmreq;
	u8 *iv;
	int r = 0;

416
	dmreq = dmreq_of_req(cc, req);
M
Milan Broz 已提交
417 418
	iv = (u8 *)ALIGN((unsigned long)(dmreq + 1),
			 crypto_ablkcipher_alignmask(cc->tfm) + 1);
419

420
	dmreq->ctx = ctx;
M
Milan Broz 已提交
421 422
	sg_init_table(&dmreq->sg_in, 1);
	sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
423 424
		    bv_in->bv_offset + ctx->offset_in);

M
Milan Broz 已提交
425 426
	sg_init_table(&dmreq->sg_out, 1);
	sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT,
427 428 429 430 431 432 433 434 435 436 437 438 439 440
		    bv_out->bv_offset + ctx->offset_out);

	ctx->offset_in += 1 << SECTOR_SHIFT;
	if (ctx->offset_in >= bv_in->bv_len) {
		ctx->offset_in = 0;
		ctx->idx_in++;
	}

	ctx->offset_out += 1 << SECTOR_SHIFT;
	if (ctx->offset_out >= bv_out->bv_len) {
		ctx->offset_out = 0;
		ctx->idx_out++;
	}

M
Milan Broz 已提交
441 442 443 444 445 446 447 448 449 450 451 452 453 454 455
	if (cc->iv_gen_ops) {
		r = cc->iv_gen_ops->generator(cc, iv, ctx->sector);
		if (r < 0)
			return r;
	}

	ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
				     1 << SECTOR_SHIFT, iv);

	if (bio_data_dir(ctx->bio_in) == WRITE)
		r = crypto_ablkcipher_encrypt(req);
	else
		r = crypto_ablkcipher_decrypt(req);

	return r;
456 457
}

M
Milan Broz 已提交
458 459
static void kcryptd_async_done(struct crypto_async_request *async_req,
			       int error);
M
Milan Broz 已提交
460 461 462 463 464
static void crypt_alloc_req(struct crypt_config *cc,
			    struct convert_context *ctx)
{
	if (!cc->req)
		cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
M
Milan Broz 已提交
465 466
	ablkcipher_request_set_tfm(cc->req, cc->tfm);
	ablkcipher_request_set_callback(cc->req, CRYPTO_TFM_REQ_MAY_BACKLOG |
467 468 469
					CRYPTO_TFM_REQ_MAY_SLEEP,
					kcryptd_async_done,
					dmreq_of_req(cc, cc->req));
M
Milan Broz 已提交
470 471
}

L
Linus Torvalds 已提交
472 473 474 475
/*
 * Encrypt / decrypt data from one bio to another one (can be the same one)
 */
static int crypt_convert(struct crypt_config *cc,
M
Milan Broz 已提交
476
			 struct convert_context *ctx)
L
Linus Torvalds 已提交
477
{
M
Milan Broz 已提交
478
	int r;
L
Linus Torvalds 已提交
479

M
Milan Broz 已提交
480 481
	atomic_set(&ctx->pending, 1);

L
Linus Torvalds 已提交
482 483 484
	while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
	      ctx->idx_out < ctx->bio_out->bi_vcnt) {

M
Milan Broz 已提交
485 486
		crypt_alloc_req(cc, ctx);

M
Milan Broz 已提交
487 488
		atomic_inc(&ctx->pending);

M
Milan Broz 已提交
489 490 491
		r = crypt_convert_block(cc, ctx, cc->req);

		switch (r) {
M
Milan Broz 已提交
492
		/* async */
M
Milan Broz 已提交
493 494 495 496 497 498
		case -EBUSY:
			wait_for_completion(&ctx->restart);
			INIT_COMPLETION(ctx->restart);
			/* fall through*/
		case -EINPROGRESS:
			cc->req = NULL;
M
Milan Broz 已提交
499 500 501 502
			ctx->sector++;
			continue;

		/* sync */
M
Milan Broz 已提交
503
		case 0:
M
Milan Broz 已提交
504
			atomic_dec(&ctx->pending);
M
Milan Broz 已提交
505
			ctx->sector++;
M
Milan Broz 已提交
506
			cond_resched();
M
Milan Broz 已提交
507 508
			continue;

M
Milan Broz 已提交
509 510 511 512 513
		/* error */
		default:
			atomic_dec(&ctx->pending);
			return r;
		}
L
Linus Torvalds 已提交
514 515
	}

M
Milan Broz 已提交
516
	return 0;
L
Linus Torvalds 已提交
517 518
}

M
Milan Broz 已提交
519 520
static void dm_crypt_bio_destructor(struct bio *bio)
{
A
Alasdair G Kergon 已提交
521
	struct dm_crypt_io *io = bio->bi_private;
M
Milan Broz 已提交
522 523 524
	struct crypt_config *cc = io->target->private;

	bio_free(bio, cc->bs);
M
Milan Broz 已提交
525
}
M
Milan Broz 已提交
526

L
Linus Torvalds 已提交
527 528 529
/*
 * Generate a new unfragmented bio with the given size
 * This should never violate the device limitations
530 531
 * May return a smaller bio when running out of pages, indicated by
 * *out_of_pages set to 1.
L
Linus Torvalds 已提交
532
 */
533 534
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
				      unsigned *out_of_pages)
L
Linus Torvalds 已提交
535
{
O
Olaf Kirch 已提交
536
	struct crypt_config *cc = io->target->private;
537
	struct bio *clone;
L
Linus Torvalds 已提交
538
	unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
539
	gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
M
Milan Broz 已提交
540 541
	unsigned i, len;
	struct page *page;
L
Linus Torvalds 已提交
542

O
Olaf Kirch 已提交
543
	clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
544
	if (!clone)
L
Linus Torvalds 已提交
545 546
		return NULL;

O
Olaf Kirch 已提交
547
	clone_init(io, clone);
548
	*out_of_pages = 0;
M
Milan Broz 已提交
549

550
	for (i = 0; i < nr_iovecs; i++) {
M
Milan Broz 已提交
551
		page = mempool_alloc(cc->page_pool, gfp_mask);
552 553
		if (!page) {
			*out_of_pages = 1;
L
Linus Torvalds 已提交
554
			break;
555
		}
L
Linus Torvalds 已提交
556 557 558 559 560 561

		/*
		 * if additional pages cannot be allocated without waiting,
		 * return a partially allocated bio, the caller will then try
		 * to allocate additional bios while submitting this partial bio
		 */
562
		if (i == (MIN_BIO_PAGES - 1))
L
Linus Torvalds 已提交
563 564
			gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;

M
Milan Broz 已提交
565 566 567 568 569 570
		len = (size > PAGE_SIZE) ? PAGE_SIZE : size;

		if (!bio_add_page(clone, page, len, 0)) {
			mempool_free(page, cc->page_pool);
			break;
		}
L
Linus Torvalds 已提交
571

M
Milan Broz 已提交
572
		size -= len;
L
Linus Torvalds 已提交
573 574
	}

575 576
	if (!clone->bi_size) {
		bio_put(clone);
L
Linus Torvalds 已提交
577 578 579
		return NULL;
	}

580
	return clone;
L
Linus Torvalds 已提交
581 582
}

N
Neil Brown 已提交
583
static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
L
Linus Torvalds 已提交
584
{
N
Neil Brown 已提交
585
	unsigned int i;
L
Linus Torvalds 已提交
586 587
	struct bio_vec *bv;

N
Neil Brown 已提交
588
	for (i = 0; i < clone->bi_vcnt; i++) {
589
		bv = bio_iovec_idx(clone, i);
L
Linus Torvalds 已提交
590 591 592 593 594 595
		BUG_ON(!bv->bv_page);
		mempool_free(bv->bv_page, cc->page_pool);
		bv->bv_page = NULL;
	}
}

M
Milan Broz 已提交
596 597 598 599 600 601 602 603 604 605 606
static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti,
					  struct bio *bio, sector_t sector)
{
	struct crypt_config *cc = ti->private;
	struct dm_crypt_io *io;

	io = mempool_alloc(cc->io_pool, GFP_NOIO);
	io->target = ti;
	io->base_bio = bio;
	io->sector = sector;
	io->error = 0;
M
Milan Broz 已提交
607
	io->base_io = NULL;
M
Milan Broz 已提交
608 609 610 611 612
	atomic_set(&io->pending, 0);

	return io;
}

M
Milan Broz 已提交
613 614 615 616 617
static void crypt_inc_pending(struct dm_crypt_io *io)
{
	atomic_inc(&io->pending);
}

L
Linus Torvalds 已提交
618 619 620
/*
 * One of the bios was finished. Check for completion of
 * the whole request and correctly clean up the buffer.
M
Milan Broz 已提交
621
 * If base_io is set, wait for the last fragment to complete.
L
Linus Torvalds 已提交
622
 */
623
static void crypt_dec_pending(struct dm_crypt_io *io)
L
Linus Torvalds 已提交
624
{
625
	struct crypt_config *cc = io->target->private;
626 627 628
	struct bio *base_bio = io->base_bio;
	struct dm_crypt_io *base_io = io->base_io;
	int error = io->error;
L
Linus Torvalds 已提交
629 630 631 632

	if (!atomic_dec_and_test(&io->pending))
		return;

633 634 635 636
	mempool_free(io, cc->io_pool);

	if (likely(!base_io))
		bio_endio(base_bio, error);
M
Milan Broz 已提交
637
	else {
638 639 640
		if (error && !base_io->error)
			base_io->error = error;
		crypt_dec_pending(base_io);
M
Milan Broz 已提交
641
	}
L
Linus Torvalds 已提交
642 643 644
}

/*
M
Milan Broz 已提交
645
 * kcryptd/kcryptd_io:
L
Linus Torvalds 已提交
646 647
 *
 * Needed because it would be very unwise to do decryption in an
648
 * interrupt context.
M
Milan Broz 已提交
649 650 651 652 653 654 655 656
 *
 * kcryptd performs the actual encryption or decryption.
 *
 * kcryptd_io performs the IO submission.
 *
 * They must be separated as otherwise the final stages could be
 * starved by new requests which can block in the first stages due
 * to memory allocation.
L
Linus Torvalds 已提交
657
 */
658
static void crypt_endio(struct bio *clone, int error)
659
{
A
Alasdair G Kergon 已提交
660
	struct dm_crypt_io *io = clone->bi_private;
661
	struct crypt_config *cc = io->target->private;
M
Milan Broz 已提交
662
	unsigned rw = bio_data_dir(clone);
663

M
Milan Broz 已提交
664 665 666
	if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
		error = -EIO;

667
	/*
668
	 * free the processed pages
669
	 */
M
Milan Broz 已提交
670
	if (rw == WRITE)
N
Neil Brown 已提交
671
		crypt_free_buffer_pages(cc, clone);
672 673 674

	bio_put(clone);

M
Milan Broz 已提交
675 676 677 678
	if (rw == READ && !error) {
		kcryptd_queue_crypt(io);
		return;
	}
679 680 681 682 683

	if (unlikely(error))
		io->error = error;

	crypt_dec_pending(io);
684 685
}

A
Alasdair G Kergon 已提交
686
static void clone_init(struct dm_crypt_io *io, struct bio *clone)
687 688 689 690 691 692 693
{
	struct crypt_config *cc = io->target->private;

	clone->bi_private = io;
	clone->bi_end_io  = crypt_endio;
	clone->bi_bdev    = cc->dev->bdev;
	clone->bi_rw      = io->base_bio->bi_rw;
O
Olaf Kirch 已提交
694
	clone->bi_destructor = dm_crypt_bio_destructor;
695 696
}

697
static void kcryptd_io_read(struct dm_crypt_io *io)
698 699 700 701
{
	struct crypt_config *cc = io->target->private;
	struct bio *base_bio = io->base_bio;
	struct bio *clone;
702

M
Milan Broz 已提交
703
	crypt_inc_pending(io);
704 705 706 707 708 709

	/*
	 * The block layer might modify the bvec array, so always
	 * copy the required bvecs because we need the original
	 * one in order to decrypt the whole bio data *afterwards*.
	 */
M
Milan Broz 已提交
710
	clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs);
711
	if (unlikely(!clone)) {
712 713
		io->error = -ENOMEM;
		crypt_dec_pending(io);
714
		return;
715
	}
716 717 718 719 720

	clone_init(io, clone);
	clone->bi_idx = 0;
	clone->bi_vcnt = bio_segments(base_bio);
	clone->bi_size = base_bio->bi_size;
721
	clone->bi_sector = cc->start + io->sector;
722 723 724
	memcpy(clone->bi_io_vec, bio_iovec(base_bio),
	       sizeof(struct bio_vec) * clone->bi_vcnt);

725
	generic_make_request(clone);
726 727
}

728 729
static void kcryptd_io_write(struct dm_crypt_io *io)
{
M
Milan Broz 已提交
730 731
	struct bio *clone = io->ctx.bio_out;
	generic_make_request(clone);
732 733
}

734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751
static void kcryptd_io(struct work_struct *work)
{
	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);

	if (bio_data_dir(io->base_bio) == READ)
		kcryptd_io_read(io);
	else
		kcryptd_io_write(io);
}

static void kcryptd_queue_io(struct dm_crypt_io *io)
{
	struct crypt_config *cc = io->target->private;

	INIT_WORK(&io->work, kcryptd_io);
	queue_work(cc->io_queue, &io->work);
}

M
Milan Broz 已提交
752 753
static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
					  int error, int async)
754
{
M
Milan Broz 已提交
755 756 757 758 759 760 761
	struct bio *clone = io->ctx.bio_out;
	struct crypt_config *cc = io->target->private;

	if (unlikely(error < 0)) {
		crypt_free_buffer_pages(cc, clone);
		bio_put(clone);
		io->error = -EIO;
762
		crypt_dec_pending(io);
M
Milan Broz 已提交
763 764 765 766 767 768 769
		return;
	}

	/* crypt_convert should have filled the clone bio */
	BUG_ON(io->ctx.idx_out < clone->bi_vcnt);

	clone->bi_sector = cc->start + io->sector;
M
Milan Broz 已提交
770

M
Milan Broz 已提交
771 772
	if (async)
		kcryptd_queue_io(io);
773
	else
M
Milan Broz 已提交
774
		generic_make_request(clone);
775 776
}

M
Milan Broz 已提交
777
static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
778 779 780
{
	struct crypt_config *cc = io->target->private;
	struct bio *clone;
M
Milan Broz 已提交
781
	struct dm_crypt_io *new_io;
M
Milan Broz 已提交
782
	int crypt_finished;
783
	unsigned out_of_pages = 0;
M
Milan Broz 已提交
784
	unsigned remaining = io->base_bio->bi_size;
M
Milan Broz 已提交
785
	sector_t sector = io->sector;
M
Milan Broz 已提交
786
	int r;
787

M
Milan Broz 已提交
788 789 790 791
	/*
	 * Prevent io from disappearing until this function completes.
	 */
	crypt_inc_pending(io);
M
Milan Broz 已提交
792
	crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
M
Milan Broz 已提交
793

794 795 796 797 798
	/*
	 * The allocated buffers can be smaller than the whole bio,
	 * so repeat the whole process until all the data can be handled.
	 */
	while (remaining) {
799
		clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
800
		if (unlikely(!clone)) {
801
			io->error = -ENOMEM;
M
Milan Broz 已提交
802
			break;
803
		}
804

805 806
		io->ctx.bio_out = clone;
		io->ctx.idx_out = 0;
807

M
Milan Broz 已提交
808
		remaining -= clone->bi_size;
M
Milan Broz 已提交
809
		sector += bio_sectors(clone);
810

M
Milan Broz 已提交
811
		crypt_inc_pending(io);
M
Milan Broz 已提交
812
		r = crypt_convert(cc, &io->ctx);
M
Milan Broz 已提交
813
		crypt_finished = atomic_dec_and_test(&io->ctx.pending);
814

M
Milan Broz 已提交
815 816
		/* Encryption was already finished, submit io now */
		if (crypt_finished) {
M
Milan Broz 已提交
817
			kcryptd_crypt_write_io_submit(io, r, 0);
M
Milan Broz 已提交
818 819 820 821 822

			/*
			 * If there was an error, do not try next fragments.
			 * For async, error is processed in async handler.
			 */
823
			if (unlikely(r < 0))
M
Milan Broz 已提交
824
				break;
M
Milan Broz 已提交
825 826

			io->sector = sector;
M
Milan Broz 已提交
827
		}
828

829 830 831 832 833
		/*
		 * Out of memory -> run queues
		 * But don't wait if split was due to the io size restriction
		 */
		if (unlikely(out_of_pages))
834
			congestion_wait(BLK_RW_ASYNC, HZ/100);
835

M
Milan Broz 已提交
836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862
		/*
		 * With async crypto it is unsafe to share the crypto context
		 * between fragments, so switch to a new dm_crypt_io structure.
		 */
		if (unlikely(!crypt_finished && remaining)) {
			new_io = crypt_io_alloc(io->target, io->base_bio,
						sector);
			crypt_inc_pending(new_io);
			crypt_convert_init(cc, &new_io->ctx, NULL,
					   io->base_bio, sector);
			new_io->ctx.idx_in = io->ctx.idx_in;
			new_io->ctx.offset_in = io->ctx.offset_in;

			/*
			 * Fragments after the first use the base_io
			 * pending count.
			 */
			if (!io->base_io)
				new_io->base_io = io;
			else {
				new_io->base_io = io->base_io;
				crypt_inc_pending(io->base_io);
				crypt_dec_pending(io);
			}

			io = new_io;
		}
863
	}
M
Milan Broz 已提交
864 865

	crypt_dec_pending(io);
866 867
}

868
static void kcryptd_crypt_read_done(struct dm_crypt_io *io, int error)
869 870 871 872 873 874 875
{
	if (unlikely(error < 0))
		io->error = -EIO;

	crypt_dec_pending(io);
}

876
static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
877 878
{
	struct crypt_config *cc = io->target->private;
879
	int r = 0;
L
Linus Torvalds 已提交
880

M
Milan Broz 已提交
881
	crypt_inc_pending(io);
M
Milan Broz 已提交
882

883
	crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
884
			   io->sector);
L
Linus Torvalds 已提交
885

886 887
	r = crypt_convert(cc, &io->ctx);

M
Milan Broz 已提交
888
	if (atomic_dec_and_test(&io->ctx.pending))
M
Milan Broz 已提交
889 890 891
		kcryptd_crypt_read_done(io, r);

	crypt_dec_pending(io);
L
Linus Torvalds 已提交
892 893
}

M
Milan Broz 已提交
894 895 896
static void kcryptd_async_done(struct crypto_async_request *async_req,
			       int error)
{
897 898
	struct dm_crypt_request *dmreq = async_req->data;
	struct convert_context *ctx = dmreq->ctx;
M
Milan Broz 已提交
899 900 901 902 903 904 905 906
	struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
	struct crypt_config *cc = io->target->private;

	if (error == -EINPROGRESS) {
		complete(&ctx->restart);
		return;
	}

907
	mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
M
Milan Broz 已提交
908 909 910 911 912 913 914 915 916 917

	if (!atomic_dec_and_test(&ctx->pending))
		return;

	if (bio_data_dir(io->base_bio) == READ)
		kcryptd_crypt_read_done(io, error);
	else
		kcryptd_crypt_write_io_submit(io, error, 1);
}

918
static void kcryptd_crypt(struct work_struct *work)
L
Linus Torvalds 已提交
919
{
A
Alasdair G Kergon 已提交
920
	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
921

M
Milan Broz 已提交
922
	if (bio_data_dir(io->base_bio) == READ)
923
		kcryptd_crypt_read_convert(io);
924
	else
925
		kcryptd_crypt_write_convert(io);
M
Milan Broz 已提交
926 927
}

928
static void kcryptd_queue_crypt(struct dm_crypt_io *io)
M
Milan Broz 已提交
929
{
930
	struct crypt_config *cc = io->target->private;
M
Milan Broz 已提交
931

932 933
	INIT_WORK(&io->work, kcryptd_crypt);
	queue_work(cc->crypt_queue, &io->work);
L
Linus Torvalds 已提交
934 935 936 937 938 939 940 941 942 943 944 945 946
}

/*
 * Decode key from its hex representation
 */
static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
{
	char buffer[3];
	char *endp;
	unsigned int i;

	buffer[2] = '\0';

947
	for (i = 0; i < size; i++) {
L
Linus Torvalds 已提交
948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969
		buffer[0] = *hex++;
		buffer[1] = *hex++;

		key[i] = (u8)simple_strtoul(buffer, &endp, 16);

		if (endp != &buffer[2])
			return -EINVAL;
	}

	if (*hex != '\0')
		return -EINVAL;

	return 0;
}

/*
 * Encode key into its hex representation
 */
static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
{
	unsigned int i;

970
	for (i = 0; i < size; i++) {
L
Linus Torvalds 已提交
971 972 973 974 975 976
		sprintf(hex, "%02x", *key);
		hex += 2;
		key++;
	}
}

M
Milan Broz 已提交
977 978 979 980 981 982 983 984 985 986
static int crypt_set_key(struct crypt_config *cc, char *key)
{
	unsigned key_size = strlen(key) >> 1;

	if (cc->key_size && cc->key_size != key_size)
		return -EINVAL;

	cc->key_size = key_size; /* initial settings */

	if ((!key_size && strcmp(key, "-")) ||
M
Milan Broz 已提交
987
	   (key_size && crypt_decode_key(cc->key, key, key_size) < 0))
M
Milan Broz 已提交
988 989 990 991
		return -EINVAL;

	set_bit(DM_CRYPT_KEY_VALID, &cc->flags);

992
	return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size);
M
Milan Broz 已提交
993 994 995 996 997 998
}

static int crypt_wipe_key(struct crypt_config *cc)
{
	clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
	memset(&cc->key, 0, cc->key_size * sizeof(u8));
999
	return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size);
M
Milan Broz 已提交
1000 1001
}

1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040
static void crypt_dtr(struct dm_target *ti)
{
	struct crypt_config *cc = ti->private;

	ti->private = NULL;

	if (!cc)
		return;

	if (cc->io_queue)
		destroy_workqueue(cc->io_queue);
	if (cc->crypt_queue)
		destroy_workqueue(cc->crypt_queue);

	if (cc->bs)
		bioset_free(cc->bs);

	if (cc->page_pool)
		mempool_destroy(cc->page_pool);
	if (cc->req_pool)
		mempool_destroy(cc->req_pool);
	if (cc->io_pool)
		mempool_destroy(cc->io_pool);

	if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
		cc->iv_gen_ops->dtr(cc);

	if (cc->tfm && !IS_ERR(cc->tfm))
		crypto_free_ablkcipher(cc->tfm);

	if (cc->dev)
		dm_put_device(ti, cc->dev);

	kfree(cc->iv_mode);

	/* Must zero key material before freeing */
	kzfree(cc);
}

L
Linus Torvalds 已提交
1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
/*
 * Construct an encryption mapping:
 * <cipher> <key> <iv_offset> <dev_path> <start>
 */
static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
	struct crypt_config *cc;
	char *tmp;
	char *cipher;
	char *chainmode;
	char *ivmode;
	char *ivopts;
	unsigned int key_size;
A
Andrew Morton 已提交
1054
	unsigned long long tmpll;
1055
	int ret = -EINVAL;
L
Linus Torvalds 已提交
1056 1057

	if (argc != 5) {
1058
		ti->error = "Not enough arguments";
L
Linus Torvalds 已提交
1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
		return -EINVAL;
	}

	tmp = argv[0];
	cipher = strsep(&tmp, "-");
	chainmode = strsep(&tmp, "-");
	ivopts = strsep(&tmp, "-");
	ivmode = strsep(&ivopts, ":");

	if (tmp)
1069
		DMWARN("Unexpected additional cipher options");
L
Linus Torvalds 已提交
1070 1071 1072

	key_size = strlen(argv[1]) >> 1;

M
Milan Broz 已提交
1073
 	cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
1074 1075
	if (!cc) {
		ti->error = "Cannot allocate transparent encryption context";
L
Linus Torvalds 已提交
1076 1077 1078
		return -ENOMEM;
	}

1079 1080
	ti->private = cc;

1081
	/* Compatibility mode for old dm-crypt cipher strings */
L
Linus Torvalds 已提交
1082 1083 1084 1085 1086
	if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) {
		chainmode = "cbc";
		ivmode = "plain";
	}

1087 1088
	if (strcmp(chainmode, "ecb") && !ivmode) {
		ti->error = "This chaining mode requires an IV mechanism";
1089
		goto bad;
L
Linus Torvalds 已提交
1090 1091
	}

1092
	ret = -ENOMEM;
M
Milan Broz 已提交
1093 1094
	if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)",
		     chainmode, cipher) >= CRYPTO_MAX_ALG_NAME) {
1095
		ti->error = "Chain mode + cipher name is too long";
1096
		goto bad;
L
Linus Torvalds 已提交
1097 1098
	}

1099 1100
	cc->tfm = crypto_alloc_ablkcipher(cc->cipher, 0, 0);
	if (IS_ERR(cc->tfm)) {
1101
		ti->error = "Error allocating crypto tfm";
1102
		goto bad;
L
Linus Torvalds 已提交
1103 1104
	}

1105 1106
	strcpy(cc->cipher, cipher);
	strcpy(cc->chainmode, chainmode);
L
Linus Torvalds 已提交
1107

1108 1109
	ret = crypt_set_key(cc, argv[1]);
	if (ret < 0) {
1110
		ti->error = "Error decoding and setting key";
1111
		goto bad;
1112 1113
	}

L
Linus Torvalds 已提交
1114
	/*
1115
	 * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi".
L
Linus Torvalds 已提交
1116 1117
	 * See comments at iv code
	 */
1118
	ret = -EINVAL;
L
Linus Torvalds 已提交
1119 1120 1121 1122
	if (ivmode == NULL)
		cc->iv_gen_ops = NULL;
	else if (strcmp(ivmode, "plain") == 0)
		cc->iv_gen_ops = &crypt_iv_plain_ops;
M
Milan Broz 已提交
1123 1124
	else if (strcmp(ivmode, "plain64") == 0)
		cc->iv_gen_ops = &crypt_iv_plain64_ops;
L
Linus Torvalds 已提交
1125 1126
	else if (strcmp(ivmode, "essiv") == 0)
		cc->iv_gen_ops = &crypt_iv_essiv_ops;
1127 1128
	else if (strcmp(ivmode, "benbi") == 0)
		cc->iv_gen_ops = &crypt_iv_benbi_ops;
L
Ludwig Nussel 已提交
1129 1130
	else if (strcmp(ivmode, "null") == 0)
		cc->iv_gen_ops = &crypt_iv_null_ops;
L
Linus Torvalds 已提交
1131
	else {
1132
		ti->error = "Invalid IV mode";
1133
		goto bad;
L
Linus Torvalds 已提交
1134 1135
	}

1136 1137 1138 1139 1140 1141 1142 1143
	/* Allocate IV */
	if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
		ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
		if (ret < 0) {
			ti->error = "Error creating IV";
			goto bad;
		}
	}
L
Linus Torvalds 已提交
1144

1145 1146 1147 1148 1149 1150 1151
	/* Initialize IV (set keys for ESSIV etc) */
	if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
		ret = cc->iv_gen_ops->init(cc);
		if (ret < 0) {
			ti->error = "Error initialising IV";
			goto bad;
		}
1152 1153
	}

1154
	cc->iv_size = crypto_ablkcipher_ivsize(cc->tfm);
1155
	if (cc->iv_size)
L
Linus Torvalds 已提交
1156
		/* at least a 64 bit sector number should fit in our buffer */
1157
		cc->iv_size = max(cc->iv_size,
M
Milan Broz 已提交
1158
				  (unsigned int)(sizeof(u64) / sizeof(u8)));
L
Linus Torvalds 已提交
1159 1160
	else {
		if (cc->iv_gen_ops) {
1161
			DMWARN("Selected cipher does not support IVs");
L
Linus Torvalds 已提交
1162 1163 1164 1165 1166 1167
			if (cc->iv_gen_ops->dtr)
				cc->iv_gen_ops->dtr(cc);
			cc->iv_gen_ops = NULL;
		}
	}

1168
	ret = -ENOMEM;
1169
	cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
L
Linus Torvalds 已提交
1170
	if (!cc->io_pool) {
1171
		ti->error = "Cannot allocate crypt io mempool";
1172
		goto bad;
L
Linus Torvalds 已提交
1173 1174
	}

M
Milan Broz 已提交
1175
	cc->dmreq_start = sizeof(struct ablkcipher_request);
1176
	cc->dmreq_start += crypto_ablkcipher_reqsize(cc->tfm);
M
Milan Broz 已提交
1177
	cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment());
1178
	cc->dmreq_start += crypto_ablkcipher_alignmask(cc->tfm) &
M
Milan Broz 已提交
1179
			   ~(crypto_tfm_ctx_alignment() - 1);
M
Milan Broz 已提交
1180 1181 1182 1183 1184

	cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
			sizeof(struct dm_crypt_request) + cc->iv_size);
	if (!cc->req_pool) {
		ti->error = "Cannot allocate crypt request mempool";
1185
		goto bad;
M
Milan Broz 已提交
1186 1187 1188
	}
	cc->req = NULL;

1189
	cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
L
Linus Torvalds 已提交
1190
	if (!cc->page_pool) {
1191
		ti->error = "Cannot allocate page mempool";
1192
		goto bad;
L
Linus Torvalds 已提交
1193 1194
	}

1195
	cc->bs = bioset_create(MIN_IOS, 0);
M
Milan Broz 已提交
1196 1197
	if (!cc->bs) {
		ti->error = "Cannot allocate crypt bioset";
1198
		goto bad;
M
Milan Broz 已提交
1199 1200
	}

1201
	ret = -EINVAL;
A
Andrew Morton 已提交
1202
	if (sscanf(argv[2], "%llu", &tmpll) != 1) {
1203
		ti->error = "Invalid iv_offset sector";
1204
		goto bad;
L
Linus Torvalds 已提交
1205
	}
A
Andrew Morton 已提交
1206
	cc->iv_offset = tmpll;
L
Linus Torvalds 已提交
1207

1208 1209 1210 1211 1212
	if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) {
		ti->error = "Device lookup failed";
		goto bad;
	}

A
Andrew Morton 已提交
1213
	if (sscanf(argv[4], "%llu", &tmpll) != 1) {
1214
		ti->error = "Invalid device sector";
1215
		goto bad;
L
Linus Torvalds 已提交
1216
	}
A
Andrew Morton 已提交
1217
	cc->start = tmpll;
L
Linus Torvalds 已提交
1218

1219
	ret = -ENOMEM;
L
Linus Torvalds 已提交
1220 1221 1222
	if (ivmode && cc->iv_gen_ops) {
		if (ivopts)
			*(ivopts - 1) = ':';
J
Julia Lawall 已提交
1223
		cc->iv_mode = kstrdup(ivmode, GFP_KERNEL);
L
Linus Torvalds 已提交
1224
		if (!cc->iv_mode) {
1225
			ti->error = "Error kmallocing iv_mode string";
1226
			goto bad;
L
Linus Torvalds 已提交
1227 1228 1229 1230
		}
	} else
		cc->iv_mode = NULL;

M
Milan Broz 已提交
1231 1232 1233
	cc->io_queue = create_singlethread_workqueue("kcryptd_io");
	if (!cc->io_queue) {
		ti->error = "Couldn't create kcryptd io queue";
1234
		goto bad;
M
Milan Broz 已提交
1235 1236 1237 1238
	}

	cc->crypt_queue = create_singlethread_workqueue("kcryptd");
	if (!cc->crypt_queue) {
1239
		ti->error = "Couldn't create kcryptd queue";
1240
		goto bad;
1241 1242
	}

M
Mikulas Patocka 已提交
1243
	ti->num_flush_requests = 1;
L
Linus Torvalds 已提交
1244 1245
	return 0;

1246 1247 1248
bad:
	crypt_dtr(ti);
	return ret;
L
Linus Torvalds 已提交
1249 1250 1251 1252 1253
}

static int crypt_map(struct dm_target *ti, struct bio *bio,
		     union map_info *map_context)
{
A
Alasdair G Kergon 已提交
1254
	struct dm_crypt_io *io;
M
Mikulas Patocka 已提交
1255 1256 1257 1258 1259 1260 1261
	struct crypt_config *cc;

	if (unlikely(bio_empty_barrier(bio))) {
		cc = ti->private;
		bio->bi_bdev = cc->dev->bdev;
		return DM_MAPIO_REMAPPED;
	}
L
Linus Torvalds 已提交
1262

M
Milan Broz 已提交
1263
	io = crypt_io_alloc(ti, bio, bio->bi_sector - ti->begin);
M
Milan Broz 已提交
1264 1265 1266 1267 1268

	if (bio_data_dir(io->base_bio) == READ)
		kcryptd_queue_io(io);
	else
		kcryptd_queue_crypt(io);
L
Linus Torvalds 已提交
1269

1270
	return DM_MAPIO_SUBMITTED;
L
Linus Torvalds 已提交
1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285
}

static int crypt_status(struct dm_target *ti, status_type_t type,
			char *result, unsigned int maxlen)
{
	struct crypt_config *cc = (struct crypt_config *) ti->private;
	unsigned int sz = 0;

	switch (type) {
	case STATUSTYPE_INFO:
		result[0] = '\0';
		break;

	case STATUSTYPE_TABLE:
		if (cc->iv_mode)
1286 1287
			DMEMIT("%s-%s-%s ", cc->cipher, cc->chainmode,
			       cc->iv_mode);
L
Linus Torvalds 已提交
1288
		else
1289
			DMEMIT("%s-%s ", cc->cipher, cc->chainmode);
L
Linus Torvalds 已提交
1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302

		if (cc->key_size > 0) {
			if ((maxlen - sz) < ((cc->key_size << 1) + 1))
				return -ENOMEM;

			crypt_encode_key(result + sz, cc->key, cc->key_size);
			sz += cc->key_size << 1;
		} else {
			if (sz >= maxlen)
				return -ENOMEM;
			result[sz++] = '-';
		}

A
Andrew Morton 已提交
1303 1304
		DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
				cc->dev->name, (unsigned long long)cc->start);
L
Linus Torvalds 已提交
1305 1306 1307 1308 1309
		break;
	}
	return 0;
}

M
Milan Broz 已提交
1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342
static void crypt_postsuspend(struct dm_target *ti)
{
	struct crypt_config *cc = ti->private;

	set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
}

static int crypt_preresume(struct dm_target *ti)
{
	struct crypt_config *cc = ti->private;

	if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
		DMERR("aborting resume - crypt key is not set.");
		return -EAGAIN;
	}

	return 0;
}

static void crypt_resume(struct dm_target *ti)
{
	struct crypt_config *cc = ti->private;

	clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
}

/* Message interface
 *	key set <key>
 *	key wipe
 */
static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
{
	struct crypt_config *cc = ti->private;
1343
	int ret = -EINVAL;
M
Milan Broz 已提交
1344 1345 1346 1347 1348 1349 1350 1351 1352

	if (argc < 2)
		goto error;

	if (!strnicmp(argv[0], MESG_STR("key"))) {
		if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
			DMWARN("not suspended during key manipulation.");
			return -EINVAL;
		}
1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366
		if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) {
			ret = crypt_set_key(cc, argv[2]);
			if (ret)
				return ret;
			if (cc->iv_gen_ops && cc->iv_gen_ops->init)
				ret = cc->iv_gen_ops->init(cc);
			return ret;
		}
		if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) {
			if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
				ret = cc->iv_gen_ops->wipe(cc);
				if (ret)
					return ret;
			}
M
Milan Broz 已提交
1367
			return crypt_wipe_key(cc);
1368
		}
M
Milan Broz 已提交
1369 1370 1371 1372 1373 1374 1375
	}

error:
	DMWARN("unrecognised message received.");
	return -EINVAL;
}

M
Milan Broz 已提交
1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390
static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
		       struct bio_vec *biovec, int max_size)
{
	struct crypt_config *cc = ti->private;
	struct request_queue *q = bdev_get_queue(cc->dev->bdev);

	if (!q->merge_bvec_fn)
		return max_size;

	bvm->bi_bdev = cc->dev->bdev;
	bvm->bi_sector = cc->start + bvm->bi_sector - ti->begin;

	return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
}

1391 1392 1393 1394 1395
static int crypt_iterate_devices(struct dm_target *ti,
				 iterate_devices_callout_fn fn, void *data)
{
	struct crypt_config *cc = ti->private;

1396
	return fn(ti, cc->dev, cc->start, ti->len, data);
1397 1398
}

L
Linus Torvalds 已提交
1399 1400
static struct target_type crypt_target = {
	.name   = "crypt",
1401
	.version = {1, 7, 0},
L
Linus Torvalds 已提交
1402 1403 1404 1405 1406
	.module = THIS_MODULE,
	.ctr    = crypt_ctr,
	.dtr    = crypt_dtr,
	.map    = crypt_map,
	.status = crypt_status,
M
Milan Broz 已提交
1407 1408 1409 1410
	.postsuspend = crypt_postsuspend,
	.preresume = crypt_preresume,
	.resume = crypt_resume,
	.message = crypt_message,
M
Milan Broz 已提交
1411
	.merge  = crypt_merge,
1412
	.iterate_devices = crypt_iterate_devices,
L
Linus Torvalds 已提交
1413 1414 1415 1416 1417 1418
};

static int __init dm_crypt_init(void)
{
	int r;

A
Alasdair G Kergon 已提交
1419
	_crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
L
Linus Torvalds 已提交
1420 1421 1422 1423 1424
	if (!_crypt_io_pool)
		return -ENOMEM;

	r = dm_register_target(&crypt_target);
	if (r < 0) {
1425
		DMERR("register failed %d", r);
1426
		kmem_cache_destroy(_crypt_io_pool);
L
Linus Torvalds 已提交
1427 1428 1429 1430 1431 1432 1433
	}

	return r;
}

static void __exit dm_crypt_exit(void)
{
1434
	dm_unregister_target(&crypt_target);
L
Linus Torvalds 已提交
1435 1436 1437 1438 1439 1440 1441 1442 1443
	kmem_cache_destroy(_crypt_io_pool);
}

module_init(dm_crypt_init);
module_exit(dm_crypt_exit);

MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
MODULE_LICENSE("GPL");