crypto.c 13.0 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * This contains encryption functions for per-file encryption.
 *
 * Copyright (C) 2015, Google, Inc.
 * Copyright (C) 2015, Motorola Mobility
 *
 * Written by Michael Halcrow, 2014.
 *
 * Filename encryption additions
 *	Uday Savagaonkar, 2014
 * Encryption policy handling additions
 *	Ildar Muslukhov, 2014
 * Add fscrypt_pullback_bio_page()
 *	Jaegeuk Kim, 2015.
 *
 * This has not yet undergone a rigorous security audit.
 *
 * The usage of AES-XTS should conform to recommendations in NIST
 * Special Publication 800-38E and IEEE P1619/D16.
 */

#include <linux/pagemap.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/ratelimit.h>
#include <linux/dcache.h>
29
#include <linux/namei.h>
30
#include <crypto/aes.h>
31
#include <crypto/skcipher.h>
32
#include "fscrypt_private.h"
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48

static unsigned int num_prealloc_crypto_pages = 32;
static unsigned int num_prealloc_crypto_ctxs = 128;

module_param(num_prealloc_crypto_pages, uint, 0444);
MODULE_PARM_DESC(num_prealloc_crypto_pages,
		"Number of crypto pages to preallocate");
module_param(num_prealloc_crypto_ctxs, uint, 0444);
MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
		"Number of crypto contexts to preallocate");

static mempool_t *fscrypt_bounce_page_pool = NULL;

static LIST_HEAD(fscrypt_free_ctxs);
static DEFINE_SPINLOCK(fscrypt_ctx_lock);

49
static struct workqueue_struct *fscrypt_read_workqueue;
50 51 52 53 54
static DEFINE_MUTEX(fscrypt_init_mutex);

static struct kmem_cache *fscrypt_ctx_cachep;
struct kmem_cache *fscrypt_info_cachep;

55 56 57 58 59 60
void fscrypt_enqueue_decrypt_work(struct work_struct *work)
{
	queue_work(fscrypt_read_workqueue, work);
}
EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work);

61
/**
62 63
 * fscrypt_release_ctx() - Release a decryption context
 * @ctx: The decryption context to release.
64
 *
65 66
 * If the decryption context was allocated from the pre-allocated pool, return
 * it to that pool.  Else, free it.
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
 */
void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
{
	unsigned long flags;

	if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
		kmem_cache_free(fscrypt_ctx_cachep, ctx);
	} else {
		spin_lock_irqsave(&fscrypt_ctx_lock, flags);
		list_add(&ctx->free_list, &fscrypt_free_ctxs);
		spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
	}
}
EXPORT_SYMBOL(fscrypt_release_ctx);

/**
83
 * fscrypt_get_ctx() - Get a decryption context
84
 * @gfp_flags:   The gfp flag for memory allocation
85
 *
86
 * Allocate and initialize a decryption context.
87
 *
88
 * Return: A new decryption context on success; an ERR_PTR() otherwise.
89
 */
90
struct fscrypt_ctx *fscrypt_get_ctx(gfp_t gfp_flags)
91
{
92
	struct fscrypt_ctx *ctx;
93 94 95
	unsigned long flags;

	/*
96 97
	 * First try getting a ctx from the free list so that we don't have to
	 * call into the slab allocator.
98 99 100 101 102 103 104 105
	 */
	spin_lock_irqsave(&fscrypt_ctx_lock, flags);
	ctx = list_first_entry_or_null(&fscrypt_free_ctxs,
					struct fscrypt_ctx, free_list);
	if (ctx)
		list_del(&ctx->free_list);
	spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
	if (!ctx) {
106
		ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags);
107 108 109 110 111 112 113 114 115 116
		if (!ctx)
			return ERR_PTR(-ENOMEM);
		ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
	} else {
		ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
	}
	return ctx;
}
EXPORT_SYMBOL(fscrypt_get_ctx);

117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags)
{
	return mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
}

/**
 * fscrypt_free_bounce_page() - free a ciphertext bounce page
 *
 * Free a bounce page that was allocated by fscrypt_encrypt_page(), or by
 * fscrypt_alloc_bounce_page() directly.
 */
void fscrypt_free_bounce_page(struct page *bounce_page)
{
	if (!bounce_page)
		return;
	set_page_private(bounce_page, (unsigned long)NULL);
	ClearPagePrivate(bounce_page);
	mempool_free(bounce_page, fscrypt_bounce_page_pool);
}
EXPORT_SYMBOL(fscrypt_free_bounce_page);

E
Eric Biggers 已提交
138 139 140 141 142 143 144 145 146 147 148 149 150
void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
			 const struct fscrypt_info *ci)
{
	memset(iv, 0, ci->ci_mode->ivsize);
	iv->lblk_num = cpu_to_le64(lblk_num);

	if (ci->ci_flags & FS_POLICY_FLAG_DIRECT_KEY)
		memcpy(iv->nonce, ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE);

	if (ci->ci_essiv_tfm != NULL)
		crypto_cipher_encrypt_one(ci->ci_essiv_tfm, iv->raw, iv->raw);
}

151 152 153 154
int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
			   u64 lblk_num, struct page *src_page,
			   struct page *dest_page, unsigned int len,
			   unsigned int offs, gfp_t gfp_flags)
155
{
E
Eric Biggers 已提交
156
	union fscrypt_iv iv;
157
	struct skcipher_request *req = NULL;
158
	DECLARE_CRYPTO_WAIT(wait);
159 160
	struct scatterlist dst, src;
	struct fscrypt_info *ci = inode->i_crypt_info;
161
	struct crypto_skcipher *tfm = ci->ci_ctfm;
162 163
	int res = 0;

164 165
	BUG_ON(len == 0);

E
Eric Biggers 已提交
166
	fscrypt_generate_iv(&iv, lblk_num, ci);
167

168
	req = skcipher_request_alloc(tfm, gfp_flags);
169
	if (!req)
170 171
		return -ENOMEM;

172
	skcipher_request_set_callback(
173
		req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
174
		crypto_req_done, &wait);
175 176

	sg_init_table(&dst, 1);
177
	sg_set_page(&dst, dest_page, len, offs);
178
	sg_init_table(&src, 1);
179
	sg_set_page(&src, src_page, len, offs);
180
	skcipher_request_set_crypt(req, &src, &dst, len, &iv);
181
	if (rw == FS_DECRYPT)
182
		res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
183
	else
184
		res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
185
	skcipher_request_free(req);
186
	if (res) {
187 188 189 190
		fscrypt_err(inode->i_sb,
			    "%scryption failed for inode %lu, block %llu: %d",
			    (rw == FS_DECRYPT ? "de" : "en"),
			    inode->i_ino, lblk_num, res);
191 192 193 194 195 196 197
		return res;
	}
	return 0;
}

/**
 * fscypt_encrypt_page() - Encrypts a page
198 199 200 201 202 203 204 205 206 207 208
 * @inode:     The inode for which the encryption should take place
 * @page:      The page to encrypt. Must be locked for bounce-page
 *             encryption.
 * @len:       Length of data to encrypt in @page and encrypted
 *             data in returned page.
 * @offs:      Offset of data within @page and returned
 *             page holding encrypted data.
 * @lblk_num:  Logical block number. This must be unique for multiple
 *             calls with same inode, except when overwriting
 *             previously written data.
 * @gfp_flags: The gfp flag for memory allocation
209
 *
210 211 212 213
 * Encrypts @page.  If the filesystem set FS_CFLG_OWN_PAGES, then the data is
 * encrypted in-place and @page is returned.  Else, a bounce page is allocated,
 * the data is encrypted into the bounce page, and the bounce page is returned.
 * The caller is responsible for calling fscrypt_free_bounce_page().
214
 *
215
 * Return: A page containing the encrypted data on success, else an ERR_PTR()
216
 */
217
struct page *fscrypt_encrypt_page(const struct inode *inode,
218 219 220 221
				struct page *page,
				unsigned int len,
				unsigned int offs,
				u64 lblk_num, gfp_t gfp_flags)
222

223
{
224
	struct page *ciphertext_page = page;
225 226
	int err;

227
	BUG_ON(len % FS_CRYPTO_BLOCK_SIZE != 0);
228

229
	if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) {
230
		/* with inplace-encryption we just encrypt the page */
231 232 233
		err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num, page,
					     ciphertext_page, len, offs,
					     gfp_flags);
234 235 236 237 238 239
		if (err)
			return ERR_PTR(err);

		return ciphertext_page;
	}

240 241
	BUG_ON(!PageLocked(page));

242
	/* The encryption operation will require a bounce page. */
243 244 245
	ciphertext_page = fscrypt_alloc_bounce_page(gfp_flags);
	if (!ciphertext_page)
		return ERR_PTR(-ENOMEM);
246

247 248 249
	err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num,
				     page, ciphertext_page, len, offs,
				     gfp_flags);
250
	if (err) {
251 252
		fscrypt_free_bounce_page(ciphertext_page);
		return ERR_PTR(err);
253
	}
254
	SetPagePrivate(ciphertext_page);
255
	set_page_private(ciphertext_page, (unsigned long)page);
256 257 258 259 260
	return ciphertext_page;
}
EXPORT_SYMBOL(fscrypt_encrypt_page);

/**
261
 * fscrypt_decrypt_page() - Decrypts a page in-place
262 263
 * @inode:     The corresponding inode for the page to decrypt.
 * @page:      The page to decrypt. Must be locked in case
264
 *             it is a writeback page (FS_CFLG_OWN_PAGES unset).
265 266 267
 * @len:       Number of bytes in @page to be decrypted.
 * @offs:      Start of data in @page.
 * @lblk_num:  Logical block number.
268 269 270 271 272 273 274
 *
 * Decrypts page in-place using the ctx encryption context.
 *
 * Called from the read completion callback.
 *
 * Return: Zero on success, non-zero otherwise.
 */
275
int fscrypt_decrypt_page(const struct inode *inode, struct page *page,
276
			unsigned int len, unsigned int offs, u64 lblk_num)
277
{
278 279 280
	if (!(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES))
		BUG_ON(!PageLocked(page));

281 282
	return fscrypt_do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page,
				      len, offs, GFP_NOFS);
283 284 285 286
}
EXPORT_SYMBOL(fscrypt_decrypt_page);

/*
287 288
 * Validate dentries in encrypted directories to make sure we aren't potentially
 * caching stale dentries after a key has been added.
289 290 291
 */
static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
{
292
	struct dentry *dir;
293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
	int err;
	int valid;

	/*
	 * Plaintext names are always valid, since fscrypt doesn't support
	 * reverting to ciphertext names without evicting the directory's inode
	 * -- which implies eviction of the dentries in the directory.
	 */
	if (!(dentry->d_flags & DCACHE_ENCRYPTED_NAME))
		return 1;

	/*
	 * Ciphertext name; valid if the directory's key is still unavailable.
	 *
	 * Although fscrypt forbids rename() on ciphertext names, we still must
	 * use dget_parent() here rather than use ->d_parent directly.  That's
	 * because a corrupted fs image may contain directory hard links, which
	 * the VFS handles by moving the directory's dentry tree in the dcache
	 * each time ->lookup() finds the directory and it already has a dentry
	 * elsewhere.  Thus ->d_parent can be changing, and we must safely grab
	 * a reference to some ->d_parent to prevent it from being freed.
	 */
315

316 317 318
	if (flags & LOOKUP_RCU)
		return -ECHILD;

319
	dir = dget_parent(dentry);
320 321
	err = fscrypt_get_encryption_info(d_inode(dir));
	valid = !fscrypt_has_encryption_key(d_inode(dir));
322
	dput(dir);
323

324 325 326 327
	if (err < 0)
		return err;

	return valid;
328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
}

const struct dentry_operations fscrypt_d_ops = {
	.d_revalidate = fscrypt_d_revalidate,
};

static void fscrypt_destroy(void)
{
	struct fscrypt_ctx *pos, *n;

	list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list)
		kmem_cache_free(fscrypt_ctx_cachep, pos);
	INIT_LIST_HEAD(&fscrypt_free_ctxs);
	mempool_destroy(fscrypt_bounce_page_pool);
	fscrypt_bounce_page_pool = NULL;
}

/**
 * fscrypt_initialize() - allocate major buffers for fs encryption.
347
 * @cop_flags:  fscrypt operations flags
348 349 350 351 352 353
 *
 * We only call this when we start accessing encrypted files, since it
 * results in memory getting allocated that wouldn't otherwise be used.
 *
 * Return: Zero on success, non-zero otherwise.
 */
354
int fscrypt_initialize(unsigned int cop_flags)
355 356 357
{
	int i, res = -ENOMEM;

358 359
	/* No need to allocate a bounce page pool if this FS won't use it. */
	if (cop_flags & FS_CFLG_OWN_PAGES)
360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
		return 0;

	mutex_lock(&fscrypt_init_mutex);
	if (fscrypt_bounce_page_pool)
		goto already_initialized;

	for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
		struct fscrypt_ctx *ctx;

		ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
		if (!ctx)
			goto fail;
		list_add(&ctx->free_list, &fscrypt_free_ctxs);
	}

	fscrypt_bounce_page_pool =
		mempool_create_page_pool(num_prealloc_crypto_pages, 0);
	if (!fscrypt_bounce_page_pool)
		goto fail;

already_initialized:
	mutex_unlock(&fscrypt_init_mutex);
	return 0;
fail:
	fscrypt_destroy();
	mutex_unlock(&fscrypt_init_mutex);
	return res;
}

389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
void fscrypt_msg(struct super_block *sb, const char *level,
		 const char *fmt, ...)
{
	static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
				      DEFAULT_RATELIMIT_BURST);
	struct va_format vaf;
	va_list args;

	if (!__ratelimit(&rs))
		return;

	va_start(args, fmt);
	vaf.fmt = fmt;
	vaf.va = &args;
	if (sb)
		printk("%sfscrypt (%s): %pV\n", level, sb->s_id, &vaf);
	else
		printk("%sfscrypt: %pV\n", level, &vaf);
	va_end(args);
}

410 411 412 413 414
/**
 * fscrypt_init() - Set up for fs encryption.
 */
static int __init fscrypt_init(void)
{
415 416 417 418 419 420 421 422
	/*
	 * Use an unbound workqueue to allow bios to be decrypted in parallel
	 * even when they happen to complete on the same CPU.  This sacrifices
	 * locality, but it's worthwhile since decryption is CPU-intensive.
	 *
	 * Also use a high-priority workqueue to prioritize decryption work,
	 * which blocks reads from completing, over regular application tasks.
	 */
423
	fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
424 425
						 WQ_UNBOUND | WQ_HIGHPRI,
						 num_online_cpus());
426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458
	if (!fscrypt_read_workqueue)
		goto fail;

	fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT);
	if (!fscrypt_ctx_cachep)
		goto fail_free_queue;

	fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
	if (!fscrypt_info_cachep)
		goto fail_free_ctx;

	return 0;

fail_free_ctx:
	kmem_cache_destroy(fscrypt_ctx_cachep);
fail_free_queue:
	destroy_workqueue(fscrypt_read_workqueue);
fail:
	return -ENOMEM;
}
module_init(fscrypt_init)

/**
 * fscrypt_exit() - Shutdown the fs encryption system
 */
static void __exit fscrypt_exit(void)
{
	fscrypt_destroy();

	if (fscrypt_read_workqueue)
		destroy_workqueue(fscrypt_read_workqueue);
	kmem_cache_destroy(fscrypt_ctx_cachep);
	kmem_cache_destroy(fscrypt_info_cachep);
459 460

	fscrypt_essiv_cleanup();
461 462 463 464
}
module_exit(fscrypt_exit);

MODULE_LICENSE("GPL");