提交 6162e4b0 编写于 作者: L Linus Torvalds

Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4

Pull ext4 updates from Ted Ts'o:
 "A few bug fixes and add support for file-system level encryption in
  ext4"

* tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: (31 commits)
  ext4 crypto: enable encryption feature flag
  ext4 crypto: add symlink encryption
  ext4 crypto: enable filename encryption
  ext4 crypto: filename encryption modifications
  ext4 crypto: partial update to namei.c for fname crypto
  ext4 crypto: insert encrypted filenames into a leaf directory block
  ext4 crypto: teach ext4_htree_store_dirent() to store decrypted filenames
  ext4 crypto: filename encryption facilities
  ext4 crypto: implement the ext4 decryption read path
  ext4 crypto: implement the ext4 encryption write path
  ext4 crypto: inherit encryption policies on inode and directory create
  ext4 crypto: enforce context consistency
  ext4 crypto: add encryption key management facilities
  ext4 crypto: add ext4 encryption facilities
  ext4 crypto: add encryption policy and password salt support
  ext4 crypto: add encryption xattr support
  ext4 crypto: export ext4_empty_dir()
  ext4 crypto: add ext4 encryption Kconfig
  ext4 crypto: reserve codepoints used by the ext4 encryption feature
  ext4 crypto: add ext4_mpage_readpages()
  ...
......@@ -64,6 +64,23 @@ config EXT4_FS_SECURITY
If you are not using a security module that requires using
extended attributes for file security labels, say N.
config EXT4_FS_ENCRYPTION
bool "Ext4 Encryption"
depends on EXT4_FS
select CRYPTO_AES
select CRYPTO_CBC
select CRYPTO_ECB
select CRYPTO_XTS
select CRYPTO_CTS
select CRYPTO_SHA256
select KEYS
select ENCRYPTED_KEYS
help
Enable encryption of ext4 files and directories. This
feature is similar to ecryptfs, but it is more memory
efficient since it avoids caching the encrypted and
decrypted pages in the page cache.
config EXT4_DEBUG
bool "EXT4 debugging support"
depends on EXT4_FS
......
......@@ -8,7 +8,9 @@ ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \
ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
mmp.o indirect.o extents_status.o xattr.o xattr_user.o \
xattr_trusted.o inline.o
xattr_trusted.o inline.o readpage.o
ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o
ext4-$(CONFIG_EXT4_FS_SECURITY) += xattr_security.o
ext4-$(CONFIG_EXT4_FS_ENCRYPTION) += crypto_policy.o crypto.o \
crypto_key.o crypto_fname.o
......@@ -4,11 +4,6 @@
* Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
*/
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/capability.h>
#include <linux/fs.h>
#include "ext4_jbd2.h"
#include "ext4.h"
#include "xattr.h"
......
......@@ -14,7 +14,6 @@
#include <linux/time.h>
#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/jbd2.h>
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
#include "ext4.h"
......@@ -641,8 +640,6 @@ ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
* fail EDQUOT for metdata, but we do account for it.
*/
if (!(*errp) && (flags & EXT4_MB_DELALLOC_RESERVED)) {
spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
dquot_alloc_block_nofail(inode,
EXT4_C2B(EXT4_SB(inode->i_sb), ar.len));
}
......
......@@ -8,7 +8,6 @@
*/
#include <linux/buffer_head.h>
#include <linux/jbd2.h>
#include "ext4.h"
unsigned int ext4_count_free(char *bitmap, unsigned int numchars)
......
......@@ -16,7 +16,6 @@
#include <linux/swap.h>
#include <linux/pagemap.h>
#include <linux/blkdev.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include "ext4.h"
......
/*
* linux/fs/ext4/crypto.c
*
* Copyright (C) 2015, Google, Inc.
*
* This contains encryption functions for ext4
*
* Written by Michael Halcrow, 2014.
*
* Filename encryption additions
* Uday Savagaonkar, 2014
* Encryption policy handling additions
* Ildar Muslukhov, 2014
*
* This has not yet undergone a rigorous security audit.
*
* The usage of AES-XTS should conform to recommendations in NIST
* Special Publication 800-38E and IEEE P1619/D16.
*/
#include <crypto/hash.h>
#include <crypto/sha.h>
#include <keys/user-type.h>
#include <keys/encrypted-type.h>
#include <linux/crypto.h>
#include <linux/ecryptfs.h>
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/key.h>
#include <linux/list.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/random.h>
#include <linux/scatterlist.h>
#include <linux/spinlock_types.h>
#include "ext4_extents.h"
#include "xattr.h"
/* Encryption added and removed here! (L: */
static unsigned int num_prealloc_crypto_pages = 32;
static unsigned int num_prealloc_crypto_ctxs = 128;
module_param(num_prealloc_crypto_pages, uint, 0444);
MODULE_PARM_DESC(num_prealloc_crypto_pages,
"Number of crypto pages to preallocate");
module_param(num_prealloc_crypto_ctxs, uint, 0444);
MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
"Number of crypto contexts to preallocate");
static mempool_t *ext4_bounce_page_pool;
static LIST_HEAD(ext4_free_crypto_ctxs);
static DEFINE_SPINLOCK(ext4_crypto_ctx_lock);
/**
* ext4_release_crypto_ctx() - Releases an encryption context
* @ctx: The encryption context to release.
*
* If the encryption context was allocated from the pre-allocated pool, returns
* it to that pool. Else, frees it.
*
* If there's a bounce page in the context, this frees that.
*/
void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx)
{
unsigned long flags;
if (ctx->bounce_page) {
if (ctx->flags & EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL)
__free_page(ctx->bounce_page);
else
mempool_free(ctx->bounce_page, ext4_bounce_page_pool);
ctx->bounce_page = NULL;
}
ctx->control_page = NULL;
if (ctx->flags & EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL) {
if (ctx->tfm)
crypto_free_tfm(ctx->tfm);
kfree(ctx);
} else {
spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
}
}
/**
* ext4_alloc_and_init_crypto_ctx() - Allocates and inits an encryption context
* @mask: The allocation mask.
*
* Return: An allocated and initialized encryption context on success. An error
* value or NULL otherwise.
*/
static struct ext4_crypto_ctx *ext4_alloc_and_init_crypto_ctx(gfp_t mask)
{
struct ext4_crypto_ctx *ctx = kzalloc(sizeof(struct ext4_crypto_ctx),
mask);
if (!ctx)
return ERR_PTR(-ENOMEM);
return ctx;
}
/**
* ext4_get_crypto_ctx() - Gets an encryption context
* @inode: The inode for which we are doing the crypto
*
* Allocates and initializes an encryption context.
*
* Return: An allocated and initialized encryption context on success; error
* value or NULL otherwise.
*/
struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
{
struct ext4_crypto_ctx *ctx = NULL;
int res = 0;
unsigned long flags;
struct ext4_encryption_key *key = &EXT4_I(inode)->i_encryption_key;
if (!ext4_read_workqueue)
ext4_init_crypto();
/*
* We first try getting the ctx from a free list because in
* the common case the ctx will have an allocated and
* initialized crypto tfm, so it's probably a worthwhile
* optimization. For the bounce page, we first try getting it
* from the kernel allocator because that's just about as fast
* as getting it from a list and because a cache of free pages
* should generally be a "last resort" option for a filesystem
* to be able to do its job.
*/
spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
ctx = list_first_entry_or_null(&ext4_free_crypto_ctxs,
struct ext4_crypto_ctx, free_list);
if (ctx)
list_del(&ctx->free_list);
spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
if (!ctx) {
ctx = ext4_alloc_and_init_crypto_ctx(GFP_NOFS);
if (IS_ERR(ctx)) {
res = PTR_ERR(ctx);
goto out;
}
ctx->flags |= EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
} else {
ctx->flags &= ~EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
}
/* Allocate a new Crypto API context if we don't already have
* one or if it isn't the right mode. */
BUG_ON(key->mode == EXT4_ENCRYPTION_MODE_INVALID);
if (ctx->tfm && (ctx->mode != key->mode)) {
crypto_free_tfm(ctx->tfm);
ctx->tfm = NULL;
ctx->mode = EXT4_ENCRYPTION_MODE_INVALID;
}
if (!ctx->tfm) {
switch (key->mode) {
case EXT4_ENCRYPTION_MODE_AES_256_XTS:
ctx->tfm = crypto_ablkcipher_tfm(
crypto_alloc_ablkcipher("xts(aes)", 0, 0));
break;
case EXT4_ENCRYPTION_MODE_AES_256_GCM:
/* TODO(mhalcrow): AEAD w/ gcm(aes);
* crypto_aead_setauthsize() */
ctx->tfm = ERR_PTR(-ENOTSUPP);
break;
default:
BUG();
}
if (IS_ERR_OR_NULL(ctx->tfm)) {
res = PTR_ERR(ctx->tfm);
ctx->tfm = NULL;
goto out;
}
ctx->mode = key->mode;
}
BUG_ON(key->size != ext4_encryption_key_size(key->mode));
/* There shouldn't be a bounce page attached to the crypto
* context at this point. */
BUG_ON(ctx->bounce_page);
out:
if (res) {
if (!IS_ERR_OR_NULL(ctx))
ext4_release_crypto_ctx(ctx);
ctx = ERR_PTR(res);
}
return ctx;
}
struct workqueue_struct *ext4_read_workqueue;
static DEFINE_MUTEX(crypto_init);
/**
* ext4_exit_crypto() - Shutdown the ext4 encryption system
*/
void ext4_exit_crypto(void)
{
struct ext4_crypto_ctx *pos, *n;
list_for_each_entry_safe(pos, n, &ext4_free_crypto_ctxs, free_list) {
if (pos->bounce_page) {
if (pos->flags &
EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL) {
__free_page(pos->bounce_page);
} else {
mempool_free(pos->bounce_page,
ext4_bounce_page_pool);
}
}
if (pos->tfm)
crypto_free_tfm(pos->tfm);
kfree(pos);
}
INIT_LIST_HEAD(&ext4_free_crypto_ctxs);
if (ext4_bounce_page_pool)
mempool_destroy(ext4_bounce_page_pool);
ext4_bounce_page_pool = NULL;
if (ext4_read_workqueue)
destroy_workqueue(ext4_read_workqueue);
ext4_read_workqueue = NULL;
}
/**
* ext4_init_crypto() - Set up for ext4 encryption.
*
* We only call this when we start accessing encrypted files, since it
* results in memory getting allocated that wouldn't otherwise be used.
*
* Return: Zero on success, non-zero otherwise.
*/
int ext4_init_crypto(void)
{
int i, res;
mutex_lock(&crypto_init);
if (ext4_read_workqueue)
goto already_initialized;
ext4_read_workqueue = alloc_workqueue("ext4_crypto", WQ_HIGHPRI, 0);
if (!ext4_read_workqueue) {
res = -ENOMEM;
goto fail;
}
for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
struct ext4_crypto_ctx *ctx;
ctx = ext4_alloc_and_init_crypto_ctx(GFP_KERNEL);
if (IS_ERR(ctx)) {
res = PTR_ERR(ctx);
goto fail;
}
list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
}
ext4_bounce_page_pool =
mempool_create_page_pool(num_prealloc_crypto_pages, 0);
if (!ext4_bounce_page_pool) {
res = -ENOMEM;
goto fail;
}
already_initialized:
mutex_unlock(&crypto_init);
return 0;
fail:
ext4_exit_crypto();
mutex_unlock(&crypto_init);
return res;
}
void ext4_restore_control_page(struct page *data_page)
{
struct ext4_crypto_ctx *ctx =
(struct ext4_crypto_ctx *)page_private(data_page);
set_page_private(data_page, (unsigned long)NULL);
ClearPagePrivate(data_page);
unlock_page(data_page);
ext4_release_crypto_ctx(ctx);
}
/**
* ext4_crypt_complete() - The completion callback for page encryption
* @req: The asynchronous encryption request context
* @res: The result of the encryption operation
*/
static void ext4_crypt_complete(struct crypto_async_request *req, int res)
{
struct ext4_completion_result *ecr = req->data;
if (res == -EINPROGRESS)
return;
ecr->res = res;
complete(&ecr->completion);
}
typedef enum {
EXT4_DECRYPT = 0,
EXT4_ENCRYPT,
} ext4_direction_t;
static int ext4_page_crypto(struct ext4_crypto_ctx *ctx,
struct inode *inode,
ext4_direction_t rw,
pgoff_t index,
struct page *src_page,
struct page *dest_page)
{
u8 xts_tweak[EXT4_XTS_TWEAK_SIZE];
struct ablkcipher_request *req = NULL;
DECLARE_EXT4_COMPLETION_RESULT(ecr);
struct scatterlist dst, src;
struct ext4_inode_info *ei = EXT4_I(inode);
struct crypto_ablkcipher *atfm = __crypto_ablkcipher_cast(ctx->tfm);
int res = 0;
BUG_ON(!ctx->tfm);
BUG_ON(ctx->mode != ei->i_encryption_key.mode);
if (ctx->mode != EXT4_ENCRYPTION_MODE_AES_256_XTS) {
printk_ratelimited(KERN_ERR
"%s: unsupported crypto algorithm: %d\n",
__func__, ctx->mode);
return -ENOTSUPP;
}
crypto_ablkcipher_clear_flags(atfm, ~0);
crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_REQ_WEAK_KEY);
res = crypto_ablkcipher_setkey(atfm, ei->i_encryption_key.raw,
ei->i_encryption_key.size);
if (res) {
printk_ratelimited(KERN_ERR
"%s: crypto_ablkcipher_setkey() failed\n",
__func__);
return res;
}
req = ablkcipher_request_alloc(atfm, GFP_NOFS);
if (!req) {
printk_ratelimited(KERN_ERR
"%s: crypto_request_alloc() failed\n",
__func__);
return -ENOMEM;
}
ablkcipher_request_set_callback(
req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
ext4_crypt_complete, &ecr);
BUILD_BUG_ON(EXT4_XTS_TWEAK_SIZE < sizeof(index));
memcpy(xts_tweak, &index, sizeof(index));
memset(&xts_tweak[sizeof(index)], 0,
EXT4_XTS_TWEAK_SIZE - sizeof(index));
sg_init_table(&dst, 1);
sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0);
sg_init_table(&src, 1);
sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0);
ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
xts_tweak);
if (rw == EXT4_DECRYPT)
res = crypto_ablkcipher_decrypt(req);
else
res = crypto_ablkcipher_encrypt(req);
if (res == -EINPROGRESS || res == -EBUSY) {
BUG_ON(req->base.data != &ecr);
wait_for_completion(&ecr.completion);
res = ecr.res;
}
ablkcipher_request_free(req);
if (res) {
printk_ratelimited(
KERN_ERR
"%s: crypto_ablkcipher_encrypt() returned %d\n",
__func__, res);
return res;
}
return 0;
}
/**
* ext4_encrypt() - Encrypts a page
* @inode: The inode for which the encryption should take place
* @plaintext_page: The page to encrypt. Must be locked.
*
* Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
* encryption context.
*
* Called on the page write path. The caller must call
* ext4_restore_control_page() on the returned ciphertext page to
* release the bounce buffer and the encryption context.
*
* Return: An allocated page with the encrypted content on success. Else, an
* error value or NULL.
*/
struct page *ext4_encrypt(struct inode *inode,
struct page *plaintext_page)
{
struct ext4_crypto_ctx *ctx;
struct page *ciphertext_page = NULL;
int err;
BUG_ON(!PageLocked(plaintext_page));
ctx = ext4_get_crypto_ctx(inode);
if (IS_ERR(ctx))
return (struct page *) ctx;
/* The encryption operation will require a bounce page. */
ciphertext_page = alloc_page(GFP_NOFS);
if (!ciphertext_page) {
/* This is a potential bottleneck, but at least we'll have
* forward progress. */
ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
GFP_NOFS);
if (WARN_ON_ONCE(!ciphertext_page)) {
ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
GFP_NOFS | __GFP_WAIT);
}
ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
} else {
ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
}
ctx->bounce_page = ciphertext_page;
ctx->control_page = plaintext_page;
err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, plaintext_page->index,
plaintext_page, ciphertext_page);
if (err) {
ext4_release_crypto_ctx(ctx);
return ERR_PTR(err);
}
SetPagePrivate(ciphertext_page);
set_page_private(ciphertext_page, (unsigned long)ctx);
lock_page(ciphertext_page);
return ciphertext_page;
}
/**
* ext4_decrypt() - Decrypts a page in-place
* @ctx: The encryption context.
* @page: The page to decrypt. Must be locked.
*
* Decrypts page in-place using the ctx encryption context.
*
* Called from the read completion callback.
*
* Return: Zero on success, non-zero otherwise.
*/
int ext4_decrypt(struct ext4_crypto_ctx *ctx, struct page *page)
{
BUG_ON(!PageLocked(page));
return ext4_page_crypto(ctx, page->mapping->host,
EXT4_DECRYPT, page->index, page, page);
}
/*
* Convenience function which takes care of allocating and
* deallocating the encryption context
*/
int ext4_decrypt_one(struct inode *inode, struct page *page)
{
int ret;
struct ext4_crypto_ctx *ctx = ext4_get_crypto_ctx(inode);
if (!ctx)
return -ENOMEM;
ret = ext4_decrypt(ctx, page);
ext4_release_crypto_ctx(ctx);
return ret;
}
int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
{
struct ext4_crypto_ctx *ctx;
struct page *ciphertext_page = NULL;
struct bio *bio;
ext4_lblk_t lblk = ex->ee_block;
ext4_fsblk_t pblk = ext4_ext_pblock(ex);
unsigned int len = ext4_ext_get_actual_len(ex);
int err = 0;
BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE);
ctx = ext4_get_crypto_ctx(inode);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
ciphertext_page = alloc_page(GFP_NOFS);
if (!ciphertext_page) {
/* This is a potential bottleneck, but at least we'll have
* forward progress. */
ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
GFP_NOFS);
if (WARN_ON_ONCE(!ciphertext_page)) {
ciphertext_page = mempool_alloc(ext4_bounce_page_pool,
GFP_NOFS | __GFP_WAIT);
}
ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
} else {
ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL;
}
ctx->bounce_page = ciphertext_page;
while (len--) {
err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, lblk,
ZERO_PAGE(0), ciphertext_page);
if (err)
goto errout;
bio = bio_alloc(GFP_KERNEL, 1);
if (!bio) {
err = -ENOMEM;
goto errout;
}
bio->bi_bdev = inode->i_sb->s_bdev;
bio->bi_iter.bi_sector = pblk;
err = bio_add_page(bio, ciphertext_page,
inode->i_sb->s_blocksize, 0);
if (err) {
bio_put(bio);
goto errout;
}
err = submit_bio_wait(WRITE, bio);
if (err)
goto errout;
}
err = 0;
errout:
ext4_release_crypto_ctx(ctx);
return err;
}
bool ext4_valid_contents_enc_mode(uint32_t mode)
{
return (mode == EXT4_ENCRYPTION_MODE_AES_256_XTS);
}
/**
* ext4_validate_encryption_key_size() - Validate the encryption key size
* @mode: The key mode.
* @size: The key size to validate.
*
* Return: The validated key size for @mode. Zero if invalid.
*/
uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size)
{
if (size == ext4_encryption_key_size(mode))
return size;
return 0;
}
/*
* linux/fs/ext4/crypto_fname.c
*
* Copyright (C) 2015, Google, Inc.
*
* This contains functions for filename crypto management in ext4
*
* Written by Uday Savagaonkar, 2014.
*
* This has not yet undergone a rigorous security audit.
*
*/
#include <crypto/hash.h>
#include <crypto/sha.h>
#include <keys/encrypted-type.h>
#include <keys/user-type.h>
#include <linux/crypto.h>
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/key.h>
#include <linux/key.h>
#include <linux/list.h>
#include <linux/mempool.h>
#include <linux/random.h>
#include <linux/scatterlist.h>
#include <linux/spinlock_types.h>
#include "ext4.h"
#include "ext4_crypto.h"
#include "xattr.h"
/**
* ext4_dir_crypt_complete() -
*/
static void ext4_dir_crypt_complete(struct crypto_async_request *req, int res)
{
struct ext4_completion_result *ecr = req->data;
if (res == -EINPROGRESS)
return;
ecr->res = res;
complete(&ecr->completion);
}
bool ext4_valid_filenames_enc_mode(uint32_t mode)
{
return (mode == EXT4_ENCRYPTION_MODE_AES_256_CTS);
}
/**
* ext4_fname_encrypt() -
*
* This function encrypts the input filename, and returns the length of the
* ciphertext. Errors are returned as negative numbers. We trust the caller to
* allocate sufficient memory to oname string.
*/
static int ext4_fname_encrypt(struct ext4_fname_crypto_ctx *ctx,
const struct qstr *iname,
struct ext4_str *oname)
{
u32 ciphertext_len;
struct ablkcipher_request *req = NULL;
DECLARE_EXT4_COMPLETION_RESULT(ecr);
struct crypto_ablkcipher *tfm = ctx->ctfm;
int res = 0;
char iv[EXT4_CRYPTO_BLOCK_SIZE];
struct scatterlist sg[1];
char *workbuf;
if (iname->len <= 0 || iname->len > ctx->lim)
return -EIO;
ciphertext_len = (iname->len < EXT4_CRYPTO_BLOCK_SIZE) ?
EXT4_CRYPTO_BLOCK_SIZE : iname->len;
ciphertext_len = (ciphertext_len > ctx->lim)
? ctx->lim : ciphertext_len;
/* Allocate request */
req = ablkcipher_request_alloc(tfm, GFP_NOFS);
if (!req) {
printk_ratelimited(
KERN_ERR "%s: crypto_request_alloc() failed\n", __func__);
return -ENOMEM;
}
ablkcipher_request_set_callback(req,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
ext4_dir_crypt_complete, &ecr);
/* Map the workpage */
workbuf = kmap(ctx->workpage);
/* Copy the input */
memcpy(workbuf, iname->name, iname->len);
if (iname->len < ciphertext_len)
memset(workbuf + iname->len, 0, ciphertext_len - iname->len);
/* Initialize IV */
memset(iv, 0, EXT4_CRYPTO_BLOCK_SIZE);
/* Create encryption request */
sg_init_table(sg, 1);
sg_set_page(sg, ctx->workpage, PAGE_SIZE, 0);
ablkcipher_request_set_crypt(req, sg, sg, iname->len, iv);
res = crypto_ablkcipher_encrypt(req);
if (res == -EINPROGRESS || res == -EBUSY) {
BUG_ON(req->base.data != &ecr);
wait_for_completion(&ecr.completion);
res = ecr.res;
}
if (res >= 0) {
/* Copy the result to output */
memcpy(oname->name, workbuf, ciphertext_len);
res = ciphertext_len;
}
kunmap(ctx->workpage);
ablkcipher_request_free(req);
if (res < 0) {
printk_ratelimited(
KERN_ERR "%s: Error (error code %d)\n", __func__, res);
}
oname->len = ciphertext_len;
return res;
}
/*
* ext4_fname_decrypt()
* This function decrypts the input filename, and returns
* the length of the plaintext.
* Errors are returned as negative numbers.
* We trust the caller to allocate sufficient memory to oname string.
*/
static int ext4_fname_decrypt(struct ext4_fname_crypto_ctx *ctx,
const struct ext4_str *iname,
struct ext4_str *oname)
{
struct ext4_str tmp_in[2], tmp_out[1];
struct ablkcipher_request *req = NULL;
DECLARE_EXT4_COMPLETION_RESULT(ecr);
struct scatterlist sg[1];
struct crypto_ablkcipher *tfm = ctx->ctfm;
int res = 0;
char iv[EXT4_CRYPTO_BLOCK_SIZE];
char *workbuf;
if (iname->len <= 0 || iname->len > ctx->lim)
return -EIO;
tmp_in[0].name = iname->name;
tmp_in[0].len = iname->len;
tmp_out[0].name = oname->name;
/* Allocate request */
req = ablkcipher_request_alloc(tfm, GFP_NOFS);
if (!req) {
printk_ratelimited(
KERN_ERR "%s: crypto_request_alloc() failed\n", __func__);
return -ENOMEM;
}
ablkcipher_request_set_callback(req,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
ext4_dir_crypt_complete, &ecr);
/* Map the workpage */
workbuf = kmap(ctx->workpage);
/* Copy the input */
memcpy(workbuf, iname->name, iname->len);
/* Initialize IV */
memset(iv, 0, EXT4_CRYPTO_BLOCK_SIZE);
/* Create encryption request */
sg_init_table(sg, 1);
sg_set_page(sg, ctx->workpage, PAGE_SIZE, 0);
ablkcipher_request_set_crypt(req, sg, sg, iname->len, iv);
res = crypto_ablkcipher_decrypt(req);
if (res == -EINPROGRESS || res == -EBUSY) {
BUG_ON(req->base.data != &ecr);
wait_for_completion(&ecr.completion);
res = ecr.res;
}
if (res >= 0) {
/* Copy the result to output */
memcpy(oname->name, workbuf, iname->len);
res = iname->len;
}
kunmap(ctx->workpage);
ablkcipher_request_free(req);
if (res < 0) {
printk_ratelimited(
KERN_ERR "%s: Error in ext4_fname_encrypt (error code %d)\n",
__func__, res);
return res;
}
oname->len = strnlen(oname->name, iname->len);
return oname->len;
}
/**
* ext4_fname_encode_digest() -
*
* Encodes the input digest using characters from the set [a-zA-Z0-9_+].
* The encoded string is roughly 4/3 times the size of the input string.
*/
int ext4_fname_encode_digest(char *dst, char *src, u32 len)
{
static const char *lookup_table =
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_+";
u32 current_chunk, num_chunks, i;
char tmp_buf[3];
u32 c0, c1, c2, c3;
current_chunk = 0;
num_chunks = len/3;
for (i = 0; i < num_chunks; i++) {
c0 = src[3*i] & 0x3f;
c1 = (((src[3*i]>>6)&0x3) | ((src[3*i+1] & 0xf)<<2)) & 0x3f;
c2 = (((src[3*i+1]>>4)&0xf) | ((src[3*i+2] & 0x3)<<4)) & 0x3f;
c3 = (src[3*i+2]>>2) & 0x3f;
dst[4*i] = lookup_table[c0];
dst[4*i+1] = lookup_table[c1];
dst[4*i+2] = lookup_table[c2];
dst[4*i+3] = lookup_table[c3];
}
if (i*3 < len) {
memset(tmp_buf, 0, 3);
memcpy(tmp_buf, &src[3*i], len-3*i);
c0 = tmp_buf[0] & 0x3f;
c1 = (((tmp_buf[0]>>6)&0x3) | ((tmp_buf[1] & 0xf)<<2)) & 0x3f;
c2 = (((tmp_buf[1]>>4)&0xf) | ((tmp_buf[2] & 0x3)<<4)) & 0x3f;
c3 = (tmp_buf[2]>>2) & 0x3f;
dst[4*i] = lookup_table[c0];
dst[4*i+1] = lookup_table[c1];
dst[4*i+2] = lookup_table[c2];
dst[4*i+3] = lookup_table[c3];
i++;
}
return (i * 4);
}
/**
* ext4_fname_hash() -
*
* This function computes the hash of the input filename, and sets the output
* buffer to the *encoded* digest. It returns the length of the digest as its
* return value. Errors are returned as negative numbers. We trust the caller
* to allocate sufficient memory to oname string.
*/
static int ext4_fname_hash(struct ext4_fname_crypto_ctx *ctx,
const struct ext4_str *iname,
struct ext4_str *oname)
{
struct scatterlist sg;
struct hash_desc desc = {
.tfm = (struct crypto_hash *)ctx->htfm,
.flags = CRYPTO_TFM_REQ_MAY_SLEEP
};
int res = 0;
if (iname->len <= EXT4_FNAME_CRYPTO_DIGEST_SIZE) {
res = ext4_fname_encode_digest(oname->name, iname->name,
iname->len);
oname->len = res;
return res;
}
sg_init_one(&sg, iname->name, iname->len);
res = crypto_hash_init(&desc);
if (res) {
printk(KERN_ERR
"%s: Error initializing crypto hash; res = [%d]\n",
__func__, res);
goto out;
}
res = crypto_hash_update(&desc, &sg, iname->len);
if (res) {
printk(KERN_ERR
"%s: Error updating crypto hash; res = [%d]\n",
__func__, res);
goto out;
}
res = crypto_hash_final(&desc,
&oname->name[EXT4_FNAME_CRYPTO_DIGEST_SIZE]);
if (res) {
printk(KERN_ERR
"%s: Error finalizing crypto hash; res = [%d]\n",
__func__, res);
goto out;
}
/* Encode the digest as a printable string--this will increase the
* size of the digest */
oname->name[0] = 'I';
res = ext4_fname_encode_digest(oname->name+1,
&oname->name[EXT4_FNAME_CRYPTO_DIGEST_SIZE],
EXT4_FNAME_CRYPTO_DIGEST_SIZE) + 1;
oname->len = res;
out:
return res;
}
/**
* ext4_free_fname_crypto_ctx() -
*
* Frees up a crypto context.
*/
void ext4_free_fname_crypto_ctx(struct ext4_fname_crypto_ctx *ctx)
{
if (ctx == NULL || IS_ERR(ctx))
return;
if (ctx->ctfm && !IS_ERR(ctx->ctfm))
crypto_free_ablkcipher(ctx->ctfm);
if (ctx->htfm && !IS_ERR(ctx->htfm))
crypto_free_hash(ctx->htfm);
if (ctx->workpage && !IS_ERR(ctx->workpage))
__free_page(ctx->workpage);
kfree(ctx);
}
/**
* ext4_put_fname_crypto_ctx() -
*
* Return: The crypto context onto free list. If the free list is above a
* threshold, completely frees up the context, and returns the memory.
*
* TODO: Currently we directly free the crypto context. Eventually we should
* add code it to return to free list. Such an approach will increase
* efficiency of directory lookup.
*/
void ext4_put_fname_crypto_ctx(struct ext4_fname_crypto_ctx **ctx)
{
if (*ctx == NULL || IS_ERR(*ctx))
return;
ext4_free_fname_crypto_ctx(*ctx);
*ctx = NULL;
}
/**
* ext4_search_fname_crypto_ctx() -
*/
static struct ext4_fname_crypto_ctx *ext4_search_fname_crypto_ctx(
const struct ext4_encryption_key *key)
{
return NULL;
}
/**
* ext4_alloc_fname_crypto_ctx() -
*/
struct ext4_fname_crypto_ctx *ext4_alloc_fname_crypto_ctx(
const struct ext4_encryption_key *key)
{
struct ext4_fname_crypto_ctx *ctx;
ctx = kmalloc(sizeof(struct ext4_fname_crypto_ctx), GFP_NOFS);
if (ctx == NULL)
return ERR_PTR(-ENOMEM);
if (key->mode == EXT4_ENCRYPTION_MODE_INVALID) {
/* This will automatically set key mode to invalid
* As enum for ENCRYPTION_MODE_INVALID is zero */
memset(&ctx->key, 0, sizeof(ctx->key));
} else {
memcpy(&ctx->key, key, sizeof(struct ext4_encryption_key));
}
ctx->has_valid_key = (EXT4_ENCRYPTION_MODE_INVALID == key->mode)
? 0 : 1;
ctx->ctfm_key_is_ready = 0;
ctx->ctfm = NULL;
ctx->htfm = NULL;
ctx->workpage = NULL;
return ctx;
}
/**
* ext4_get_fname_crypto_ctx() -
*
* Allocates a free crypto context and initializes it to hold
* the crypto material for the inode.
*
* Return: NULL if not encrypted. Error value on error. Valid pointer otherwise.
*/
struct ext4_fname_crypto_ctx *ext4_get_fname_crypto_ctx(
struct inode *inode, u32 max_ciphertext_len)
{
struct ext4_fname_crypto_ctx *ctx;
struct ext4_inode_info *ei = EXT4_I(inode);
int res;
/* Check if the crypto policy is set on the inode */
res = ext4_encrypted_inode(inode);
if (res == 0)
return NULL;
if (!ext4_has_encryption_key(inode))
ext4_generate_encryption_key(inode);
/* Get a crypto context based on the key.
* A new context is allocated if no context matches the requested key.
*/
ctx = ext4_search_fname_crypto_ctx(&(ei->i_encryption_key));
if (ctx == NULL)
ctx = ext4_alloc_fname_crypto_ctx(&(ei->i_encryption_key));
if (IS_ERR(ctx))
return ctx;
if (ctx->has_valid_key) {
if (ctx->key.mode != EXT4_ENCRYPTION_MODE_AES_256_CTS) {
printk_once(KERN_WARNING
"ext4: unsupported key mode %d\n",
ctx->key.mode);
return ERR_PTR(-ENOKEY);
}
/* As a first cut, we will allocate new tfm in every call.
* later, we will keep the tfm around, in case the key gets
* re-used */
if (ctx->ctfm == NULL) {
ctx->ctfm = crypto_alloc_ablkcipher("cts(cbc(aes))",
0, 0);
}
if (IS_ERR(ctx->ctfm)) {
res = PTR_ERR(ctx->ctfm);
printk(
KERN_DEBUG "%s: error (%d) allocating crypto tfm\n",
__func__, res);
ctx->ctfm = NULL;
ext4_put_fname_crypto_ctx(&ctx);
return ERR_PTR(res);
}
if (ctx->ctfm == NULL) {
printk(
KERN_DEBUG "%s: could not allocate crypto tfm\n",
__func__);
ext4_put_fname_crypto_ctx(&ctx);
return ERR_PTR(-ENOMEM);
}
if (ctx->workpage == NULL)
ctx->workpage = alloc_page(GFP_NOFS);
if (IS_ERR(ctx->workpage)) {
res = PTR_ERR(ctx->workpage);
printk(
KERN_DEBUG "%s: error (%d) allocating work page\n",
__func__, res);
ctx->workpage = NULL;
ext4_put_fname_crypto_ctx(&ctx);
return ERR_PTR(res);
}
if (ctx->workpage == NULL) {
printk(
KERN_DEBUG "%s: could not allocate work page\n",
__func__);
ext4_put_fname_crypto_ctx(&ctx);
return ERR_PTR(-ENOMEM);
}
ctx->lim = max_ciphertext_len;
crypto_ablkcipher_clear_flags(ctx->ctfm, ~0);
crypto_tfm_set_flags(crypto_ablkcipher_tfm(ctx->ctfm),
CRYPTO_TFM_REQ_WEAK_KEY);
/* If we are lucky, we will get a context that is already
* set up with the right key. Else, we will have to
* set the key */
if (!ctx->ctfm_key_is_ready) {
/* Since our crypto objectives for filename encryption
* are pretty weak,
* we directly use the inode master key */
res = crypto_ablkcipher_setkey(ctx->ctfm,
ctx->key.raw, ctx->key.size);
if (res) {
ext4_put_fname_crypto_ctx(&ctx);
return ERR_PTR(-EIO);
}
ctx->ctfm_key_is_ready = 1;
} else {
/* In the current implementation, key should never be
* marked "ready" for a context that has just been
* allocated. So we should never reach here */
BUG();
}
}
if (ctx->htfm == NULL)
ctx->htfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(ctx->htfm)) {
res = PTR_ERR(ctx->htfm);
printk(KERN_DEBUG "%s: error (%d) allocating hash tfm\n",
__func__, res);
ctx->htfm = NULL;
ext4_put_fname_crypto_ctx(&ctx);
return ERR_PTR(res);
}
if (ctx->htfm == NULL) {
printk(KERN_DEBUG "%s: could not allocate hash tfm\n",
__func__);
ext4_put_fname_crypto_ctx(&ctx);
return ERR_PTR(-ENOMEM);
}
return ctx;
}
/**
* ext4_fname_crypto_round_up() -
*
* Return: The next multiple of block size
*/
u32 ext4_fname_crypto_round_up(u32 size, u32 blksize)
{
return ((size+blksize-1)/blksize)*blksize;
}
/**
* ext4_fname_crypto_namelen_on_disk() -
*/
int ext4_fname_crypto_namelen_on_disk(struct ext4_fname_crypto_ctx *ctx,
u32 namelen)
{
u32 ciphertext_len;
if (ctx == NULL)
return -EIO;
if (!(ctx->has_valid_key))
return -EACCES;
ciphertext_len = (namelen < EXT4_CRYPTO_BLOCK_SIZE) ?
EXT4_CRYPTO_BLOCK_SIZE : namelen;
ciphertext_len = (ciphertext_len > ctx->lim)
? ctx->lim : ciphertext_len;
return (int) ciphertext_len;
}
/**
* ext4_fname_crypto_alloc_obuff() -
*
* Allocates an output buffer that is sufficient for the crypto operation
* specified by the context and the direction.
*/
int ext4_fname_crypto_alloc_buffer(struct ext4_fname_crypto_ctx *ctx,
u32 ilen, struct ext4_str *crypto_str)
{
unsigned int olen;
if (!ctx)
return -EIO;
olen = ext4_fname_crypto_round_up(ilen, EXT4_CRYPTO_BLOCK_SIZE);
crypto_str->len = olen;
if (olen < EXT4_FNAME_CRYPTO_DIGEST_SIZE*2)
olen = EXT4_FNAME_CRYPTO_DIGEST_SIZE*2;
/* Allocated buffer can hold one more character to null-terminate the
* string */
crypto_str->name = kmalloc(olen+1, GFP_NOFS);
if (!(crypto_str->name))
return -ENOMEM;
return 0;
}
/**
* ext4_fname_crypto_free_buffer() -
*
* Frees the buffer allocated for crypto operation.
*/
void ext4_fname_crypto_free_buffer(struct ext4_str *crypto_str)
{
if (!crypto_str)
return;
kfree(crypto_str->name);
crypto_str->name = NULL;
}
/**
* ext4_fname_disk_to_usr() - converts a filename from disk space to user space
*/
int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
const struct ext4_str *iname,
struct ext4_str *oname)
{
if (ctx == NULL)
return -EIO;
if (iname->len < 3) {
/*Check for . and .. */
if (iname->name[0] == '.' && iname->name[iname->len-1] == '.') {
oname->name[0] = '.';
oname->name[iname->len-1] = '.';
oname->len = iname->len;
return oname->len;
}
}
if (ctx->has_valid_key)
return ext4_fname_decrypt(ctx, iname, oname);
else
return ext4_fname_hash(ctx, iname, oname);
}
int ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
const struct ext4_dir_entry_2 *de,
struct ext4_str *oname)
{
struct ext4_str iname = {.name = (unsigned char *) de->name,
.len = de->name_len };
return _ext4_fname_disk_to_usr(ctx, &iname, oname);
}
/**
* ext4_fname_usr_to_disk() - converts a filename from user space to disk space
*/
int ext4_fname_usr_to_disk(struct ext4_fname_crypto_ctx *ctx,
const struct qstr *iname,
struct ext4_str *oname)
{
int res;
if (ctx == NULL)
return -EIO;
if (iname->len < 3) {
/*Check for . and .. */
if (iname->name[0] == '.' &&
iname->name[iname->len-1] == '.') {
oname->name[0] = '.';
oname->name[iname->len-1] = '.';
oname->len = iname->len;
return oname->len;
}
}
if (ctx->has_valid_key) {
res = ext4_fname_encrypt(ctx, iname, oname);
return res;
}
/* Without a proper key, a user is not allowed to modify the filenames
* in a directory. Consequently, a user space name cannot be mapped to
* a disk-space name */
return -EACCES;
}
/*
* Calculate the htree hash from a filename from user space
*/
int ext4_fname_usr_to_hash(struct ext4_fname_crypto_ctx *ctx,
const struct qstr *iname,
struct dx_hash_info *hinfo)
{
struct ext4_str tmp, tmp2;
int ret = 0;
if (!ctx || !ctx->has_valid_key ||
((iname->name[0] == '.') &&
((iname->len == 1) ||
((iname->name[1] == '.') && (iname->len == 2))))) {
ext4fs_dirhash(iname->name, iname->len, hinfo);
return 0;
}
/* First encrypt the plaintext name */
ret = ext4_fname_crypto_alloc_buffer(ctx, iname->len, &tmp);
if (ret < 0)
return ret;
ret = ext4_fname_encrypt(ctx, iname, &tmp);
if (ret < 0)
goto out;
tmp2.len = (4 * ((EXT4_FNAME_CRYPTO_DIGEST_SIZE + 2) / 3)) + 1;
tmp2.name = kmalloc(tmp2.len + 1, GFP_KERNEL);
if (tmp2.name == NULL) {
ret = -ENOMEM;
goto out;
}
ret = ext4_fname_hash(ctx, &tmp, &tmp2);
if (ret > 0)
ext4fs_dirhash(tmp2.name, tmp2.len, hinfo);
ext4_fname_crypto_free_buffer(&tmp2);
out:
ext4_fname_crypto_free_buffer(&tmp);
return ret;
}
/**
* ext4_fname_disk_to_htree() - converts a filename from disk space to htree-access string
*/
int ext4_fname_disk_to_hash(struct ext4_fname_crypto_ctx *ctx,
const struct ext4_dir_entry_2 *de,
struct dx_hash_info *hinfo)
{
struct ext4_str iname = {.name = (unsigned char *) de->name,
.len = de->name_len};
struct ext4_str tmp;
int ret;
if (!ctx ||
((iname.name[0] == '.') &&
((iname.len == 1) ||
((iname.name[1] == '.') && (iname.len == 2))))) {
ext4fs_dirhash(iname.name, iname.len, hinfo);
return 0;
}
tmp.len = (4 * ((EXT4_FNAME_CRYPTO_DIGEST_SIZE + 2) / 3)) + 1;
tmp.name = kmalloc(tmp.len + 1, GFP_KERNEL);
if (tmp.name == NULL)
return -ENOMEM;
ret = ext4_fname_hash(ctx, &iname, &tmp);
if (ret > 0)
ext4fs_dirhash(tmp.name, tmp.len, hinfo);
ext4_fname_crypto_free_buffer(&tmp);
return ret;
}
/*
* linux/fs/ext4/crypto_key.c
*
* Copyright (C) 2015, Google, Inc.
*
* This contains encryption key functions for ext4
*
* Written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar, 2015.
*/
#include <keys/encrypted-type.h>
#include <keys/user-type.h>
#include <linux/random.h>
#include <linux/scatterlist.h>
#include <uapi/linux/keyctl.h>
#include "ext4.h"
#include "xattr.h"
static void derive_crypt_complete(struct crypto_async_request *req, int rc)
{
struct ext4_completion_result *ecr = req->data;
if (rc == -EINPROGRESS)
return;
ecr->res = rc;
complete(&ecr->completion);
}
/**
* ext4_derive_key_aes() - Derive a key using AES-128-ECB
* @deriving_key: Encryption key used for derivatio.
* @source_key: Source key to which to apply derivation.
* @derived_key: Derived key.
*
* Return: Zero on success; non-zero otherwise.
*/
static int ext4_derive_key_aes(char deriving_key[EXT4_AES_128_ECB_KEY_SIZE],
char source_key[EXT4_AES_256_XTS_KEY_SIZE],
char derived_key[EXT4_AES_256_XTS_KEY_SIZE])
{
int res = 0;
struct ablkcipher_request *req = NULL;
DECLARE_EXT4_COMPLETION_RESULT(ecr);
struct scatterlist src_sg, dst_sg;
struct crypto_ablkcipher *tfm = crypto_alloc_ablkcipher("ecb(aes)", 0,
0);
if (IS_ERR(tfm)) {
res = PTR_ERR(tfm);
tfm = NULL;
goto out;
}
crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
req = ablkcipher_request_alloc(tfm, GFP_NOFS);
if (!req) {
res = -ENOMEM;
goto out;
}
ablkcipher_request_set_callback(req,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
derive_crypt_complete, &ecr);
res = crypto_ablkcipher_setkey(tfm, deriving_key,
EXT4_AES_128_ECB_KEY_SIZE);
if (res < 0)
goto out;
sg_init_one(&src_sg, source_key, EXT4_AES_256_XTS_KEY_SIZE);
sg_init_one(&dst_sg, derived_key, EXT4_AES_256_XTS_KEY_SIZE);
ablkcipher_request_set_crypt(req, &src_sg, &dst_sg,
EXT4_AES_256_XTS_KEY_SIZE, NULL);
res = crypto_ablkcipher_encrypt(req);
if (res == -EINPROGRESS || res == -EBUSY) {
BUG_ON(req->base.data != &ecr);
wait_for_completion(&ecr.completion);
res = ecr.res;
}
out:
if (req)
ablkcipher_request_free(req);
if (tfm)
crypto_free_ablkcipher(tfm);
return res;
}
/**
* ext4_generate_encryption_key() - generates an encryption key
* @inode: The inode to generate the encryption key for.
*/
int ext4_generate_encryption_key(struct inode *inode)
{
struct ext4_inode_info *ei = EXT4_I(inode);
struct ext4_encryption_key *crypt_key = &ei->i_encryption_key;
char full_key_descriptor[EXT4_KEY_DESC_PREFIX_SIZE +
(EXT4_KEY_DESCRIPTOR_SIZE * 2) + 1];
struct key *keyring_key = NULL;
struct ext4_encryption_key *master_key;
struct ext4_encryption_context ctx;
struct user_key_payload *ukp;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
int res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
&ctx, sizeof(ctx));
if (res != sizeof(ctx)) {
if (res > 0)
res = -EINVAL;
goto out;
}
res = 0;
if (S_ISREG(inode->i_mode))
crypt_key->mode = ctx.contents_encryption_mode;
else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
crypt_key->mode = ctx.filenames_encryption_mode;
else {
printk(KERN_ERR "ext4 crypto: Unsupported inode type.\n");
BUG();
}
crypt_key->size = ext4_encryption_key_size(crypt_key->mode);
BUG_ON(!crypt_key->size);
if (DUMMY_ENCRYPTION_ENABLED(sbi)) {
memset(crypt_key->raw, 0x42, EXT4_AES_256_XTS_KEY_SIZE);
goto out;
}
memcpy(full_key_descriptor, EXT4_KEY_DESC_PREFIX,
EXT4_KEY_DESC_PREFIX_SIZE);
sprintf(full_key_descriptor + EXT4_KEY_DESC_PREFIX_SIZE,
"%*phN", EXT4_KEY_DESCRIPTOR_SIZE,
ctx.master_key_descriptor);
full_key_descriptor[EXT4_KEY_DESC_PREFIX_SIZE +
(2 * EXT4_KEY_DESCRIPTOR_SIZE)] = '\0';
keyring_key = request_key(&key_type_logon, full_key_descriptor, NULL);
if (IS_ERR(keyring_key)) {
res = PTR_ERR(keyring_key);
keyring_key = NULL;
goto out;
}
BUG_ON(keyring_key->type != &key_type_logon);
ukp = ((struct user_key_payload *)keyring_key->payload.data);
if (ukp->datalen != sizeof(struct ext4_encryption_key)) {
res = -EINVAL;
goto out;
}
master_key = (struct ext4_encryption_key *)ukp->data;
BUILD_BUG_ON(EXT4_AES_128_ECB_KEY_SIZE !=
EXT4_KEY_DERIVATION_NONCE_SIZE);
BUG_ON(master_key->size != EXT4_AES_256_XTS_KEY_SIZE);
res = ext4_derive_key_aes(ctx.nonce, master_key->raw, crypt_key->raw);
out:
if (keyring_key)
key_put(keyring_key);
if (res < 0)
crypt_key->mode = EXT4_ENCRYPTION_MODE_INVALID;
return res;
}
int ext4_has_encryption_key(struct inode *inode)
{
struct ext4_inode_info *ei = EXT4_I(inode);
struct ext4_encryption_key *crypt_key = &ei->i_encryption_key;
return (crypt_key->mode != EXT4_ENCRYPTION_MODE_INVALID);
}
/*
* linux/fs/ext4/crypto_policy.c
*
* Copyright (C) 2015, Google, Inc.
*
* This contains encryption policy functions for ext4
*
* Written by Michael Halcrow, 2015.
*/
#include <linux/random.h>
#include <linux/string.h>
#include <linux/types.h>
#include "ext4.h"
#include "xattr.h"
static int ext4_inode_has_encryption_context(struct inode *inode)
{
int res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, NULL, 0);
return (res > 0);
}
/*
* check whether the policy is consistent with the encryption context
* for the inode
*/
static int ext4_is_encryption_context_consistent_with_policy(
struct inode *inode, const struct ext4_encryption_policy *policy)
{
struct ext4_encryption_context ctx;
int res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx,
sizeof(ctx));
if (res != sizeof(ctx))
return 0;
return (memcmp(ctx.master_key_descriptor, policy->master_key_descriptor,
EXT4_KEY_DESCRIPTOR_SIZE) == 0 &&
(ctx.contents_encryption_mode ==
policy->contents_encryption_mode) &&
(ctx.filenames_encryption_mode ==
policy->filenames_encryption_mode));
}
static int ext4_create_encryption_context_from_policy(
struct inode *inode, const struct ext4_encryption_policy *policy)
{
struct ext4_encryption_context ctx;
int res = 0;
ctx.format = EXT4_ENCRYPTION_CONTEXT_FORMAT_V1;
memcpy(ctx.master_key_descriptor, policy->master_key_descriptor,
EXT4_KEY_DESCRIPTOR_SIZE);
if (!ext4_valid_contents_enc_mode(policy->contents_encryption_mode)) {
printk(KERN_WARNING
"%s: Invalid contents encryption mode %d\n", __func__,
policy->contents_encryption_mode);
res = -EINVAL;
goto out;
}
if (!ext4_valid_filenames_enc_mode(policy->filenames_encryption_mode)) {
printk(KERN_WARNING
"%s: Invalid filenames encryption mode %d\n", __func__,
policy->filenames_encryption_mode);
res = -EINVAL;
goto out;
}
ctx.contents_encryption_mode = policy->contents_encryption_mode;
ctx.filenames_encryption_mode = policy->filenames_encryption_mode;
BUILD_BUG_ON(sizeof(ctx.nonce) != EXT4_KEY_DERIVATION_NONCE_SIZE);
get_random_bytes(ctx.nonce, EXT4_KEY_DERIVATION_NONCE_SIZE);
res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION,
EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx,
sizeof(ctx), 0);
out:
if (!res)
ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
return res;
}
int ext4_process_policy(const struct ext4_encryption_policy *policy,
struct inode *inode)
{
if (policy->version != 0)
return -EINVAL;
if (!ext4_inode_has_encryption_context(inode)) {
if (!ext4_empty_dir(inode))
return -ENOTEMPTY;
return ext4_create_encryption_context_from_policy(inode,
policy);
}
if (ext4_is_encryption_context_consistent_with_policy(inode, policy))
return 0;
printk(KERN_WARNING "%s: Policy inconsistent with encryption context\n",
__func__);
return -EINVAL;
}
int ext4_get_policy(struct inode *inode, struct ext4_encryption_policy *policy)
{
struct ext4_encryption_context ctx;
int res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
&ctx, sizeof(ctx));
if (res != sizeof(ctx))
return -ENOENT;
if (ctx.format != EXT4_ENCRYPTION_CONTEXT_FORMAT_V1)
return -EINVAL;
policy->version = 0;
policy->contents_encryption_mode = ctx.contents_encryption_mode;
policy->filenames_encryption_mode = ctx.filenames_encryption_mode;
memcpy(&policy->master_key_descriptor, ctx.master_key_descriptor,
EXT4_KEY_DESCRIPTOR_SIZE);
return 0;
}
int ext4_is_child_context_consistent_with_parent(struct inode *parent,
struct inode *child)
{
struct ext4_encryption_context parent_ctx, child_ctx;
int res;
if ((parent == NULL) || (child == NULL)) {
pr_err("parent %p child %p\n", parent, child);
BUG_ON(1);
}
/* no restrictions if the parent directory is not encrypted */
if (!ext4_encrypted_inode(parent))
return 1;
res = ext4_xattr_get(parent, EXT4_XATTR_INDEX_ENCRYPTION,
EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
&parent_ctx, sizeof(parent_ctx));
if (res != sizeof(parent_ctx))
return 0;
/* if the child directory is not encrypted, this is always a problem */
if (!ext4_encrypted_inode(child))
return 0;
res = ext4_xattr_get(child, EXT4_XATTR_INDEX_ENCRYPTION,
EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
&child_ctx, sizeof(child_ctx));
if (res != sizeof(child_ctx))
return 0;
return (memcmp(parent_ctx.master_key_descriptor,
child_ctx.master_key_descriptor,
EXT4_KEY_DESCRIPTOR_SIZE) == 0 &&
(parent_ctx.contents_encryption_mode ==
child_ctx.contents_encryption_mode) &&
(parent_ctx.filenames_encryption_mode ==
child_ctx.filenames_encryption_mode));
}
/**
* ext4_inherit_context() - Sets a child context from its parent
* @parent: Parent inode from which the context is inherited.
* @child: Child inode that inherits the context from @parent.
*
* Return: Zero on success, non-zero otherwise
*/
int ext4_inherit_context(struct inode *parent, struct inode *child)
{
struct ext4_encryption_context ctx;
int res = ext4_xattr_get(parent, EXT4_XATTR_INDEX_ENCRYPTION,
EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
&ctx, sizeof(ctx));
if (res != sizeof(ctx)) {
if (DUMMY_ENCRYPTION_ENABLED(EXT4_SB(parent->i_sb))) {
ctx.format = EXT4_ENCRYPTION_CONTEXT_FORMAT_V1;
ctx.contents_encryption_mode =
EXT4_ENCRYPTION_MODE_AES_256_XTS;
ctx.filenames_encryption_mode =
EXT4_ENCRYPTION_MODE_AES_256_CTS;
memset(ctx.master_key_descriptor, 0x42,
EXT4_KEY_DESCRIPTOR_SIZE);
res = 0;
} else {
goto out;
}
}
get_random_bytes(ctx.nonce, EXT4_KEY_DERIVATION_NONCE_SIZE);
res = ext4_xattr_set(child, EXT4_XATTR_INDEX_ENCRYPTION,
EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx,
sizeof(ctx), 0);
out:
if (!res)
ext4_set_inode_flag(child, EXT4_INODE_ENCRYPT);
return res;
}
......@@ -22,10 +22,8 @@
*/
#include <linux/fs.h>
#include <linux/jbd2.h>
#include <linux/buffer_head.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include "ext4.h"
#include "xattr.h"
......@@ -110,7 +108,10 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
int err;
struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
struct buffer_head *bh = NULL;
int dir_has_error = 0;
struct ext4_fname_crypto_ctx *enc_ctx = NULL;
struct ext4_str fname_crypto_str = {.name = NULL, .len = 0};
if (is_dx_dir(inode)) {
err = ext4_dx_readdir(file, ctx);
......@@ -127,17 +128,28 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
if (ext4_has_inline_data(inode)) {
int has_inline_data = 1;
int ret = ext4_read_inline_dir(file, ctx,
err = ext4_read_inline_dir(file, ctx,
&has_inline_data);
if (has_inline_data)
return ret;
return err;
}
enc_ctx = ext4_get_fname_crypto_ctx(inode, EXT4_NAME_LEN);
if (IS_ERR(enc_ctx))
return PTR_ERR(enc_ctx);
if (enc_ctx) {
err = ext4_fname_crypto_alloc_buffer(enc_ctx, EXT4_NAME_LEN,
&fname_crypto_str);
if (err < 0) {
ext4_put_fname_crypto_ctx(&enc_ctx);
return err;
}
}
offset = ctx->pos & (sb->s_blocksize - 1);
while (ctx->pos < inode->i_size) {
struct ext4_map_blocks map;
struct buffer_head *bh = NULL;
map.m_lblk = ctx->pos >> EXT4_BLOCK_SIZE_BITS(sb);
map.m_len = 1;
......@@ -180,6 +192,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
(unsigned long long)ctx->pos);
ctx->pos += sb->s_blocksize - offset;
brelse(bh);
bh = NULL;
continue;
}
set_buffer_verified(bh);
......@@ -226,25 +239,44 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
offset += ext4_rec_len_from_disk(de->rec_len,
sb->s_blocksize);
if (le32_to_cpu(de->inode)) {
if (!dir_emit(ctx, de->name,
de->name_len,
le32_to_cpu(de->inode),
get_dtype(sb, de->file_type))) {
brelse(bh);
return 0;
if (enc_ctx == NULL) {
/* Directory is not encrypted */
if (!dir_emit(ctx, de->name,
de->name_len,
le32_to_cpu(de->inode),
get_dtype(sb, de->file_type)))
goto done;
} else {
/* Directory is encrypted */
err = ext4_fname_disk_to_usr(enc_ctx,
de, &fname_crypto_str);
if (err < 0)
goto errout;
if (!dir_emit(ctx,
fname_crypto_str.name, err,
le32_to_cpu(de->inode),
get_dtype(sb, de->file_type)))
goto done;
}
}
ctx->pos += ext4_rec_len_from_disk(de->rec_len,
sb->s_blocksize);
}
offset = 0;
if ((ctx->pos < inode->i_size) && !dir_relax(inode))
goto done;
brelse(bh);
if (ctx->pos < inode->i_size) {
if (!dir_relax(inode))
return 0;
}
bh = NULL;
offset = 0;
}
return 0;
done:
err = 0;
errout:
#ifdef CONFIG_EXT4_FS_ENCRYPTION
ext4_put_fname_crypto_ctx(&enc_ctx);
ext4_fname_crypto_free_buffer(&fname_crypto_str);
#endif
brelse(bh);
return err;
}
static inline int is_32bit_api(void)
......@@ -384,10 +416,15 @@ void ext4_htree_free_dir_info(struct dir_private_info *p)
/*
* Given a directory entry, enter it into the fname rb tree.
*
* When filename encryption is enabled, the dirent will hold the
* encrypted filename, while the htree will hold decrypted filename.
* The decrypted filename is passed in via ent_name. parameter.
*/
int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
__u32 minor_hash,
struct ext4_dir_entry_2 *dirent)
struct ext4_dir_entry_2 *dirent,
struct ext4_str *ent_name)
{
struct rb_node **p, *parent = NULL;
struct fname *fname, *new_fn;
......@@ -398,17 +435,17 @@ int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
p = &info->root.rb_node;
/* Create and allocate the fname structure */
len = sizeof(struct fname) + dirent->name_len + 1;
len = sizeof(struct fname) + ent_name->len + 1;
new_fn = kzalloc(len, GFP_KERNEL);
if (!new_fn)
return -ENOMEM;
new_fn->hash = hash;
new_fn->minor_hash = minor_hash;
new_fn->inode = le32_to_cpu(dirent->inode);
new_fn->name_len = dirent->name_len;
new_fn->name_len = ent_name->len;
new_fn->file_type = dirent->file_type;
memcpy(new_fn->name, dirent->name, dirent->name_len);
new_fn->name[dirent->name_len] = 0;
memcpy(new_fn->name, ent_name->name, ent_name->len);
new_fn->name[ent_name->len] = 0;
while (*p) {
parent = *p;
......
......@@ -422,7 +422,7 @@ enum {
EXT4_INODE_DIRTY = 8,
EXT4_INODE_COMPRBLK = 9, /* One or more compressed clusters */
EXT4_INODE_NOCOMPR = 10, /* Don't compress */
EXT4_INODE_ENCRYPT = 11, /* Compression error */
EXT4_INODE_ENCRYPT = 11, /* Encrypted file */
/* End compression flags --- maybe not all used */
EXT4_INODE_INDEX = 12, /* hash-indexed directory */
EXT4_INODE_IMAGIC = 13, /* AFS directory */
......@@ -582,6 +582,15 @@ enum {
#define EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER 0x0010
#define EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER 0x0020
/* Encryption algorithms */
#define EXT4_ENCRYPTION_MODE_INVALID 0
#define EXT4_ENCRYPTION_MODE_AES_256_XTS 1
#define EXT4_ENCRYPTION_MODE_AES_256_GCM 2
#define EXT4_ENCRYPTION_MODE_AES_256_CBC 3
#define EXT4_ENCRYPTION_MODE_AES_256_CTS 4
#include "ext4_crypto.h"
/*
* ioctl commands
*/
......@@ -603,6 +612,9 @@ enum {
#define EXT4_IOC_RESIZE_FS _IOW('f', 16, __u64)
#define EXT4_IOC_SWAP_BOOT _IO('f', 17)
#define EXT4_IOC_PRECACHE_EXTENTS _IO('f', 18)
#define EXT4_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct ext4_encryption_policy)
#define EXT4_IOC_GET_ENCRYPTION_PWSALT _IOW('f', 20, __u8[16])
#define EXT4_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct ext4_encryption_policy)
#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
/*
......@@ -939,6 +951,11 @@ struct ext4_inode_info {
/* Precomputed uuid+inum+igen checksum for seeding inode checksums */
__u32 i_csum_seed;
#ifdef CONFIG_EXT4_FS_ENCRYPTION
/* Encryption params */
struct ext4_encryption_key i_encryption_key;
#endif
};
/*
......@@ -1142,7 +1159,8 @@ struct ext4_super_block {
__le32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/
__u8 s_log_groups_per_flex; /* FLEX_BG group size */
__u8 s_checksum_type; /* metadata checksum algorithm used */
__le16 s_reserved_pad;
__u8 s_encryption_level; /* versioning level for encryption */
__u8 s_reserved_pad; /* Padding to next 32bits */
__le64 s_kbytes_written; /* nr of lifetime kilobytes written */
__le32 s_snapshot_inum; /* Inode number of active snapshot */
__le32 s_snapshot_id; /* sequential ID of active snapshot */
......@@ -1169,7 +1187,9 @@ struct ext4_super_block {
__le32 s_overhead_clusters; /* overhead blocks/clusters in fs */
__le32 s_backup_bgs[2]; /* groups with sparse_super2 SBs */
__u8 s_encrypt_algos[4]; /* Encryption algorithms in use */
__le32 s_reserved[105]; /* Padding to the end of the block */
__u8 s_encrypt_pw_salt[16]; /* Salt used for string2key algorithm */
__le32 s_lpf_ino; /* Location of the lost+found inode */
__le32 s_reserved[100]; /* Padding to the end of the block */
__le32 s_checksum; /* crc32c(superblock) */
};
......@@ -1180,8 +1200,16 @@ struct ext4_super_block {
/*
* run-time mount flags
*/
#define EXT4_MF_MNTDIR_SAMPLED 0x0001
#define EXT4_MF_FS_ABORTED 0x0002 /* Fatal error detected */
#define EXT4_MF_MNTDIR_SAMPLED 0x0001
#define EXT4_MF_FS_ABORTED 0x0002 /* Fatal error detected */
#define EXT4_MF_TEST_DUMMY_ENCRYPTION 0x0004
#ifdef CONFIG_EXT4_FS_ENCRYPTION
#define DUMMY_ENCRYPTION_ENABLED(sbi) (unlikely((sbi)->s_mount_flags & \
EXT4_MF_TEST_DUMMY_ENCRYPTION))
#else
#define DUMMY_ENCRYPTION_ENABLED(sbi) (0)
#endif
/* Number of quota types we support */
#define EXT4_MAXQUOTAS 2
......@@ -1351,6 +1379,12 @@ struct ext4_sb_info {
struct ratelimit_state s_err_ratelimit_state;
struct ratelimit_state s_warning_ratelimit_state;
struct ratelimit_state s_msg_ratelimit_state;
#ifdef CONFIG_EXT4_FS_ENCRYPTION
/* Encryption */
uint32_t s_file_encryption_mode;
uint32_t s_dir_encryption_mode;
#endif
};
static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
......@@ -1466,6 +1500,18 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
#define EXT4_SB(sb) (sb)
#endif
/*
* Returns true if the inode is inode is encrypted
*/
static inline int ext4_encrypted_inode(struct inode *inode)
{
#ifdef CONFIG_EXT4_FS_ENCRYPTION
return ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT);
#else
return 0;
#endif
}
#define NEXT_ORPHAN(inode) EXT4_I(inode)->i_dtime
/*
......@@ -1575,8 +1621,9 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
EXT4_FEATURE_INCOMPAT_EXTENTS| \
EXT4_FEATURE_INCOMPAT_64BIT| \
EXT4_FEATURE_INCOMPAT_FLEX_BG| \
EXT4_FEATURE_INCOMPAT_MMP | \
EXT4_FEATURE_INCOMPAT_INLINE_DATA)
EXT4_FEATURE_INCOMPAT_MMP | \
EXT4_FEATURE_INCOMPAT_INLINE_DATA | \
EXT4_FEATURE_INCOMPAT_ENCRYPT)
#define EXT4_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \
......@@ -2001,6 +2048,99 @@ extern unsigned ext4_free_clusters_after_init(struct super_block *sb,
struct ext4_group_desc *gdp);
ext4_fsblk_t ext4_inode_to_goal_block(struct inode *);
/* crypto_policy.c */
int ext4_is_child_context_consistent_with_parent(struct inode *parent,
struct inode *child);
int ext4_inherit_context(struct inode *parent, struct inode *child);
void ext4_to_hex(char *dst, char *src, size_t src_size);
int ext4_process_policy(const struct ext4_encryption_policy *policy,
struct inode *inode);
int ext4_get_policy(struct inode *inode,
struct ext4_encryption_policy *policy);
/* crypto.c */
bool ext4_valid_contents_enc_mode(uint32_t mode);
uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size);
extern struct workqueue_struct *ext4_read_workqueue;
struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode);
void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx);
void ext4_restore_control_page(struct page *data_page);
struct page *ext4_encrypt(struct inode *inode,
struct page *plaintext_page);
int ext4_decrypt(struct ext4_crypto_ctx *ctx, struct page *page);
int ext4_decrypt_one(struct inode *inode, struct page *page);
int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex);
#ifdef CONFIG_EXT4_FS_ENCRYPTION
int ext4_init_crypto(void);
void ext4_exit_crypto(void);
static inline int ext4_sb_has_crypto(struct super_block *sb)
{
return EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_ENCRYPT);
}
#else
static inline int ext4_init_crypto(void) { return 0; }
static inline void ext4_exit_crypto(void) { }
static inline int ext4_sb_has_crypto(struct super_block *sb)
{
return 0;
}
#endif
/* crypto_fname.c */
bool ext4_valid_filenames_enc_mode(uint32_t mode);
u32 ext4_fname_crypto_round_up(u32 size, u32 blksize);
int ext4_fname_crypto_alloc_buffer(struct ext4_fname_crypto_ctx *ctx,
u32 ilen, struct ext4_str *crypto_str);
int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
const struct ext4_str *iname,
struct ext4_str *oname);
int ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
const struct ext4_dir_entry_2 *de,
struct ext4_str *oname);
int ext4_fname_usr_to_disk(struct ext4_fname_crypto_ctx *ctx,
const struct qstr *iname,
struct ext4_str *oname);
int ext4_fname_usr_to_hash(struct ext4_fname_crypto_ctx *ctx,
const struct qstr *iname,
struct dx_hash_info *hinfo);
int ext4_fname_disk_to_hash(struct ext4_fname_crypto_ctx *ctx,
const struct ext4_dir_entry_2 *de,
struct dx_hash_info *hinfo);
int ext4_fname_crypto_namelen_on_disk(struct ext4_fname_crypto_ctx *ctx,
u32 namelen);
#ifdef CONFIG_EXT4_FS_ENCRYPTION
void ext4_put_fname_crypto_ctx(struct ext4_fname_crypto_ctx **ctx);
struct ext4_fname_crypto_ctx *ext4_get_fname_crypto_ctx(struct inode *inode,
u32 max_len);
void ext4_fname_crypto_free_buffer(struct ext4_str *crypto_str);
#else
static inline
void ext4_put_fname_crypto_ctx(struct ext4_fname_crypto_ctx **ctx) { }
static inline
struct ext4_fname_crypto_ctx *ext4_get_fname_crypto_ctx(struct inode *inode,
u32 max_len)
{
return NULL;
}
static inline void ext4_fname_crypto_free_buffer(struct ext4_str *p) { }
#endif
/* crypto_key.c */
int ext4_generate_encryption_key(struct inode *inode);
#ifdef CONFIG_EXT4_FS_ENCRYPTION
int ext4_has_encryption_key(struct inode *inode);
#else
static inline int ext4_has_encryption_key(struct inode *inode)
{
return 0;
}
#endif
/* dir.c */
extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *,
struct file *,
......@@ -2011,17 +2151,20 @@ extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *,
unlikely(__ext4_check_dir_entry(__func__, __LINE__, (dir), (filp), \
(de), (bh), (buf), (size), (offset)))
extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
__u32 minor_hash,
struct ext4_dir_entry_2 *dirent);
__u32 minor_hash,
struct ext4_dir_entry_2 *dirent,
struct ext4_str *ent_name);
extern void ext4_htree_free_dir_info(struct dir_private_info *p);
extern int ext4_find_dest_de(struct inode *dir, struct inode *inode,
struct buffer_head *bh,
void *buf, int buf_size,
const char *name, int namelen,
struct ext4_dir_entry_2 **dest_de);
void ext4_insert_dentry(struct inode *inode,
int ext4_insert_dentry(struct inode *dir,
struct inode *inode,
struct ext4_dir_entry_2 *de,
int buf_size,
const struct qstr *iname,
const char *name, int namelen);
static inline void ext4_update_dx_flag(struct inode *inode)
{
......@@ -2099,6 +2242,7 @@ extern int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
extern int ext4_trim_fs(struct super_block *, struct fstrim_range *);
/* inode.c */
int ext4_inode_is_fast_symlink(struct inode *inode);
struct buffer_head *ext4_getblk(handle_t *, struct inode *, ext4_lblk_t, int);
struct buffer_head *ext4_bread(handle_t *, struct inode *, ext4_lblk_t, int);
int ext4_get_block_write(struct inode *inode, sector_t iblock,
......@@ -2189,6 +2333,7 @@ extern int ext4_generic_delete_entry(handle_t *handle,
void *entry_buf,
int buf_size,
int csum_size);
extern int ext4_empty_dir(struct inode *inode);
/* resize.c */
extern int ext4_group_add(struct super_block *sb,
......@@ -2698,6 +2843,10 @@ static inline void ext4_set_de_type(struct super_block *sb,
de->file_type = ext4_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
}
/* readpages.c */
extern int ext4_mpage_readpages(struct address_space *mapping,
struct list_head *pages, struct page *page,
unsigned nr_pages);
/* symlink.c */
extern const struct inode_operations ext4_symlink_inode_operations;
......
/*
* linux/fs/ext4/ext4_crypto.h
*
* Copyright (C) 2015, Google, Inc.
*
* This contains encryption header content for ext4
*
* Written by Michael Halcrow, 2015.
*/
#ifndef _EXT4_CRYPTO_H
#define _EXT4_CRYPTO_H
#include <linux/fs.h>
#define EXT4_KEY_DESCRIPTOR_SIZE 8
/* Policy provided via an ioctl on the topmost directory */
struct ext4_encryption_policy {
char version;
char contents_encryption_mode;
char filenames_encryption_mode;
char master_key_descriptor[EXT4_KEY_DESCRIPTOR_SIZE];
} __attribute__((__packed__));
#define EXT4_ENCRYPTION_CONTEXT_FORMAT_V1 1
#define EXT4_KEY_DERIVATION_NONCE_SIZE 16
/**
* Encryption context for inode
*
* Protector format:
* 1 byte: Protector format (1 = this version)
* 1 byte: File contents encryption mode
* 1 byte: File names encryption mode
* 1 byte: Reserved
* 8 bytes: Master Key descriptor
* 16 bytes: Encryption Key derivation nonce
*/
struct ext4_encryption_context {
char format;
char contents_encryption_mode;
char filenames_encryption_mode;
char reserved;
char master_key_descriptor[EXT4_KEY_DESCRIPTOR_SIZE];
char nonce[EXT4_KEY_DERIVATION_NONCE_SIZE];
} __attribute__((__packed__));
/* Encryption parameters */
#define EXT4_XTS_TWEAK_SIZE 16
#define EXT4_AES_128_ECB_KEY_SIZE 16
#define EXT4_AES_256_GCM_KEY_SIZE 32
#define EXT4_AES_256_CBC_KEY_SIZE 32
#define EXT4_AES_256_CTS_KEY_SIZE 32
#define EXT4_AES_256_XTS_KEY_SIZE 64
#define EXT4_MAX_KEY_SIZE 64
#define EXT4_KEY_DESC_PREFIX "ext4:"
#define EXT4_KEY_DESC_PREFIX_SIZE 5
struct ext4_encryption_key {
uint32_t mode;
char raw[EXT4_MAX_KEY_SIZE];
uint32_t size;
};
#define EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
#define EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL 0x00000002
struct ext4_crypto_ctx {
struct crypto_tfm *tfm; /* Crypto API context */
struct page *bounce_page; /* Ciphertext page on write path */
struct page *control_page; /* Original page on write path */
struct bio *bio; /* The bio for this context */
struct work_struct work; /* Work queue for read complete path */
struct list_head free_list; /* Free list */
int flags; /* Flags */
int mode; /* Encryption mode for tfm */
};
struct ext4_completion_result {
struct completion completion;
int res;
};
#define DECLARE_EXT4_COMPLETION_RESULT(ecr) \
struct ext4_completion_result ecr = { \
COMPLETION_INITIALIZER((ecr).completion), 0 }
static inline int ext4_encryption_key_size(int mode)
{
switch (mode) {
case EXT4_ENCRYPTION_MODE_AES_256_XTS:
return EXT4_AES_256_XTS_KEY_SIZE;
case EXT4_ENCRYPTION_MODE_AES_256_GCM:
return EXT4_AES_256_GCM_KEY_SIZE;
case EXT4_ENCRYPTION_MODE_AES_256_CBC:
return EXT4_AES_256_CBC_KEY_SIZE;
case EXT4_ENCRYPTION_MODE_AES_256_CTS:
return EXT4_AES_256_CTS_KEY_SIZE;
default:
BUG();
}
return 0;
}
#define EXT4_FNAME_NUM_SCATTER_ENTRIES 4
#define EXT4_CRYPTO_BLOCK_SIZE 16
#define EXT4_FNAME_CRYPTO_DIGEST_SIZE 32
struct ext4_str {
unsigned char *name;
u32 len;
};
struct ext4_fname_crypto_ctx {
u32 lim;
char tmp_buf[EXT4_CRYPTO_BLOCK_SIZE];
struct crypto_ablkcipher *ctfm;
struct crypto_hash *htfm;
struct page *workpage;
struct ext4_encryption_key key;
unsigned has_valid_key : 1;
unsigned ctfm_key_is_ready : 1;
};
/**
* For encrypted symlinks, the ciphertext length is stored at the beginning
* of the string in little-endian format.
*/
struct ext4_encrypted_symlink_data {
__le16 len;
char encrypted_path[1];
} __attribute__((__packed__));
/**
* This function is used to calculate the disk space required to
* store a filename of length l in encrypted symlink format.
*/
static inline u32 encrypted_symlink_data_len(u32 l)
{
if (l < EXT4_CRYPTO_BLOCK_SIZE)
l = EXT4_CRYPTO_BLOCK_SIZE;
return (l + sizeof(struct ext4_encrypted_symlink_data) - 1);
}
#endif /* _EXT4_CRYPTO_H */
......@@ -1717,12 +1717,6 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
{
unsigned short ext1_ee_len, ext2_ee_len;
/*
* Make sure that both extents are initialized. We don't merge
* unwritten extents so that we can be sure that end_io code has
* the extent that was written properly split out and conversion to
* initialized is trivial.
*/
if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2))
return 0;
......@@ -3128,6 +3122,9 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
ee_len = ext4_ext_get_actual_len(ex);
ee_pblock = ext4_ext_pblock(ex);
if (ext4_encrypted_inode(inode))
return ext4_encrypted_zeroout(inode, ex);
ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
if (ret > 0)
ret = 0;
......@@ -4535,19 +4532,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
*/
reserved_clusters = get_reserved_cluster_alloc(inode,
map->m_lblk, allocated);
if (map_from_cluster) {
if (reserved_clusters) {
/*
* We have clusters reserved for this range.
* But since we are not doing actual allocation
* and are simply using blocks from previously
* allocated cluster, we should release the
* reservation and not claim quota.
*/
ext4_da_update_reserve_space(inode,
reserved_clusters, 0);
}
} else {
if (!map_from_cluster) {
BUG_ON(allocated_clusters < reserved_clusters);
if (reserved_clusters < allocated_clusters) {
struct ext4_inode_info *ei = EXT4_I(inode);
......@@ -4803,12 +4788,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
else
max_blocks -= lblk;
flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT |
EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
EXT4_EX_NOCACHE;
if (mode & FALLOC_FL_KEEP_SIZE)
flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
mutex_lock(&inode->i_mutex);
/*
......@@ -4825,15 +4804,28 @@ static long ext4_zero_range(struct file *file, loff_t offset,
ret = inode_newsize_ok(inode, new_size);
if (ret)
goto out_mutex;
/*
* If we have a partial block after EOF we have to allocate
* the entire block.
*/
if (partial_end)
max_blocks += 1;
}
flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
if (mode & FALLOC_FL_KEEP_SIZE)
flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
/* Preallocate the range including the unaligned edges */
if (partial_begin || partial_end) {
ret = ext4_alloc_file_blocks(file,
round_down(offset, 1 << blkbits) >> blkbits,
(round_up((offset + len), 1 << blkbits) -
round_down(offset, 1 << blkbits)) >> blkbits,
new_size, flags, mode);
if (ret)
goto out_mutex;
}
/* Zero range excluding the unaligned edges */
if (max_blocks > 0) {
flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
EXT4_EX_NOCACHE);
/* Now release the pages and zero block aligned part of pages*/
truncate_pagecache_range(inode, start, end - 1);
......@@ -4847,19 +4839,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
flags, mode);
if (ret)
goto out_dio;
/*
* Remove entire range from the extent status tree.
*
* ext4_es_remove_extent(inode, lblk, max_blocks) is
* NOT sufficient. I'm not sure why this is the case,
* but let's be conservative and remove the extent
* status tree for the entire inode. There should be
* no outstanding delalloc extents thanks to the
* filemap_write_and_wait_range() call above.
*/
ret = ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
if (ret)
goto out_dio;
}
if (!partial_begin && !partial_end)
goto out_dio;
......@@ -4922,6 +4901,20 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
ext4_lblk_t lblk;
unsigned int blkbits = inode->i_blkbits;
/*
* Encrypted inodes can't handle collapse range or insert
* range since we would need to re-encrypt blocks with a
* different IV or XTS tweak (which are based on the logical
* block number).
*
* XXX It's not clear why zero range isn't working, but we'll
* leave it disabled for encrypted inodes for now. This is a
* bug we should fix....
*/
if (ext4_encrypted_inode(inode) &&
(mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE)))
return -EOPNOTSUPP;
/* Return error if mode is not supported */
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE))
......
......@@ -9,12 +9,10 @@
*
* Ext4 extents status tree core functions.
*/
#include <linux/rbtree.h>
#include <linux/list_sort.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include "ext4.h"
#include "extents_status.h"
#include <trace/events/ext4.h>
......
......@@ -20,7 +20,6 @@
#include <linux/time.h>
#include <linux/fs.h>
#include <linux/jbd2.h>
#include <linux/mount.h>
#include <linux/path.h>
#include <linux/quotaops.h>
......@@ -221,6 +220,13 @@ static const struct vm_operations_struct ext4_file_vm_ops = {
static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
{
struct inode *inode = file->f_mapping->host;
if (ext4_encrypted_inode(inode)) {
int err = ext4_generate_encryption_key(inode);
if (err)
return 0;
}
file_accessed(file);
if (IS_DAX(file_inode(file))) {
vma->vm_ops = &ext4_dax_vm_ops;
......@@ -238,6 +244,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
struct vfsmount *mnt = filp->f_path.mnt;
struct path path;
char buf[64], *cp;
int ret;
if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
!(sb->s_flags & MS_RDONLY))) {
......@@ -276,11 +283,17 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
* writing and the journal is present
*/
if (filp->f_mode & FMODE_WRITE) {
int ret = ext4_inode_attach_jinode(inode);
ret = ext4_inode_attach_jinode(inode);
if (ret < 0)
return ret;
}
return dquot_file_open(inode, filp);
ret = dquot_file_open(inode, filp);
if (!ret && ext4_encrypted_inode(inode)) {
ret = ext4_generate_encryption_key(inode);
if (ret)
ret = -EACCES;
}
return ret;
}
/*
......
......@@ -26,7 +26,6 @@
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/writeback.h>
#include <linux/jbd2.h>
#include <linux/blkdev.h>
#include "ext4.h"
......
......@@ -10,7 +10,6 @@
*/
#include <linux/fs.h>
#include <linux/jbd2.h>
#include <linux/cryptohash.h>
#include "ext4.h"
......
......@@ -14,7 +14,6 @@
#include <linux/time.h>
#include <linux/fs.h>
#include <linux/jbd2.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/quotaops.h>
......@@ -997,6 +996,12 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
ei->i_block_group = group;
ei->i_last_alloc_group = ~0;
/* If the directory encrypted, then we should encrypt the inode. */
if ((S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) &&
(ext4_encrypted_inode(dir) ||
DUMMY_ENCRYPTION_ENABLED(sbi)))
ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
ext4_set_inode_flags(inode);
if (IS_DIRSYNC(inode))
ext4_handle_sync(handle);
......@@ -1029,11 +1034,28 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
ext4_set_inode_state(inode, EXT4_STATE_NEW);
ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
#ifdef CONFIG_EXT4_FS_ENCRYPTION
if ((sbi->s_file_encryption_mode == EXT4_ENCRYPTION_MODE_INVALID) &&
(sbi->s_dir_encryption_mode == EXT4_ENCRYPTION_MODE_INVALID)) {
ei->i_inline_off = 0;
if (EXT4_HAS_INCOMPAT_FEATURE(sb,
EXT4_FEATURE_INCOMPAT_INLINE_DATA))
ext4_set_inode_state(inode,
EXT4_STATE_MAY_INLINE_DATA);
} else {
/* Inline data and encryption are incompatible
* We turn off inline data since encryption is enabled */
ei->i_inline_off = 1;
if (EXT4_HAS_INCOMPAT_FEATURE(sb,
EXT4_FEATURE_INCOMPAT_INLINE_DATA))
ext4_clear_inode_state(inode,
EXT4_STATE_MAY_INLINE_DATA);
}
#else
ei->i_inline_off = 0;
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_INLINE_DATA))
ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
#endif
ret = inode;
err = dquot_alloc_inode(inode);
if (err)
......
......@@ -11,11 +11,13 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/fiemap.h>
#include "ext4_jbd2.h"
#include "ext4.h"
#include "xattr.h"
#include "truncate.h"
#include <linux/fiemap.h>
#define EXT4_XATTR_SYSTEM_DATA "data"
#define EXT4_MIN_INLINE_DATA_SIZE ((sizeof(__le32) * EXT4_N_BLOCKS))
......@@ -972,7 +974,7 @@ void ext4_show_inline_dir(struct inode *dir, struct buffer_head *bh,
offset = 0;
while ((void *)de < dlimit) {
de_len = ext4_rec_len_from_disk(de->rec_len, inline_size);
trace_printk("de: off %u rlen %u name %*.s nlen %u ino %u\n",
trace_printk("de: off %u rlen %u name %.*s nlen %u ino %u\n",
offset, de_len, de->name_len, de->name,
de->name_len, le32_to_cpu(de->inode));
if (ext4_check_dir_entry(dir, NULL, de, bh,
......@@ -1014,7 +1016,8 @@ static int ext4_add_dirent_to_inline(handle_t *handle,
err = ext4_journal_get_write_access(handle, iloc->bh);
if (err)
return err;
ext4_insert_dentry(inode, de, inline_size, name, namelen);
ext4_insert_dentry(dir, inode, de, inline_size, &dentry->d_name,
name, namelen);
ext4_show_inline_dir(dir, iloc->bh, inline_start, inline_size);
......@@ -1327,6 +1330,7 @@ int htree_inlinedir_to_tree(struct file *dir_file,
struct ext4_iloc iloc;
void *dir_buf = NULL;
struct ext4_dir_entry_2 fake;
struct ext4_str tmp_str;
ret = ext4_get_inode_loc(inode, &iloc);
if (ret)
......@@ -1398,8 +1402,10 @@ int htree_inlinedir_to_tree(struct file *dir_file,
continue;
if (de->inode == 0)
continue;
err = ext4_htree_store_dirent(dir_file,
hinfo->hash, hinfo->minor_hash, de);
tmp_str.name = de->name;
tmp_str.len = de->name_len;
err = ext4_htree_store_dirent(dir_file, hinfo->hash,
hinfo->minor_hash, de, &tmp_str);
if (err) {
count = err;
goto out;
......
......@@ -20,7 +20,6 @@
#include <linux/fs.h>
#include <linux/time.h>
#include <linux/jbd2.h>
#include <linux/highuid.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
......@@ -36,7 +35,6 @@
#include <linux/kernel.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/ratelimit.h>
#include <linux/bitops.h>
#include "ext4_jbd2.h"
......@@ -140,7 +138,7 @@ static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
/*
* Test whether an inode is a fast symlink.
*/
static int ext4_inode_is_fast_symlink(struct inode *inode)
int ext4_inode_is_fast_symlink(struct inode *inode)
{
int ea_blocks = EXT4_I(inode)->i_file_acl ?
EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
......@@ -887,6 +885,95 @@ int do_journal_get_write_access(handle_t *handle,
static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
#ifdef CONFIG_EXT4_FS_ENCRYPTION
static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
get_block_t *get_block)
{
unsigned from = pos & (PAGE_CACHE_SIZE - 1);
unsigned to = from + len;
struct inode *inode = page->mapping->host;
unsigned block_start, block_end;
sector_t block;
int err = 0;
unsigned blocksize = inode->i_sb->s_blocksize;
unsigned bbits;
struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
bool decrypt = false;
BUG_ON(!PageLocked(page));
BUG_ON(from > PAGE_CACHE_SIZE);
BUG_ON(to > PAGE_CACHE_SIZE);
BUG_ON(from > to);
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize, 0);
head = page_buffers(page);
bbits = ilog2(blocksize);
block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
for (bh = head, block_start = 0; bh != head || !block_start;
block++, block_start = block_end, bh = bh->b_this_page) {
block_end = block_start + blocksize;
if (block_end <= from || block_start >= to) {
if (PageUptodate(page)) {
if (!buffer_uptodate(bh))
set_buffer_uptodate(bh);
}
continue;
}
if (buffer_new(bh))
clear_buffer_new(bh);
if (!buffer_mapped(bh)) {
WARN_ON(bh->b_size != blocksize);
err = get_block(inode, block, bh, 1);
if (err)
break;
if (buffer_new(bh)) {
unmap_underlying_metadata(bh->b_bdev,
bh->b_blocknr);
if (PageUptodate(page)) {
clear_buffer_new(bh);
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
continue;
}
if (block_end > to || block_start < from)
zero_user_segments(page, to, block_end,
block_start, from);
continue;
}
}
if (PageUptodate(page)) {
if (!buffer_uptodate(bh))
set_buffer_uptodate(bh);
continue;
}
if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
!buffer_unwritten(bh) &&
(block_start < from || block_end > to)) {
ll_rw_block(READ, 1, &bh);
*wait_bh++ = bh;
decrypt = ext4_encrypted_inode(inode) &&
S_ISREG(inode->i_mode);
}
}
/*
* If we issued read requests, let them complete.
*/
while (wait_bh > wait) {
wait_on_buffer(*--wait_bh);
if (!buffer_uptodate(*wait_bh))
err = -EIO;
}
if (unlikely(err))
page_zero_new_buffers(page, from, to);
else if (decrypt)
err = ext4_decrypt_one(inode, page);
return err;
}
#endif
static int ext4_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
......@@ -949,11 +1036,19 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
/* In case writeback began while the page was unlocked */
wait_for_stable_page(page);
#ifdef CONFIG_EXT4_FS_ENCRYPTION
if (ext4_should_dioread_nolock(inode))
ret = ext4_block_write_begin(page, pos, len,
ext4_get_block_write);
else
ret = ext4_block_write_begin(page, pos, len,
ext4_get_block);
#else
if (ext4_should_dioread_nolock(inode))
ret = __block_write_begin(page, pos, len, ext4_get_block_write);
else
ret = __block_write_begin(page, pos, len, ext4_get_block);
#endif
if (!ret && ext4_should_journal_data(inode)) {
ret = ext4_walk_page_buffers(handle, page_buffers(page),
from, to, NULL,
......@@ -2575,7 +2670,12 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
/* In case writeback began while the page was unlocked */
wait_for_stable_page(page);
#ifdef CONFIG_EXT4_FS_ENCRYPTION
ret = ext4_block_write_begin(page, pos, len,
ext4_da_get_block_prep);
#else
ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
#endif
if (ret < 0) {
unlock_page(page);
ext4_journal_stop(handle);
......@@ -2821,7 +2921,7 @@ static int ext4_readpage(struct file *file, struct page *page)
ret = ext4_readpage_inline(inode, page);
if (ret == -EAGAIN)
return mpage_readpage(page, ext4_get_block);
return ext4_mpage_readpages(page->mapping, NULL, page, 1);
return ret;
}
......@@ -2836,7 +2936,7 @@ ext4_readpages(struct file *file, struct address_space *mapping,
if (ext4_has_inline_data(inode))
return 0;
return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
return ext4_mpage_readpages(mapping, pages, NULL, nr_pages);
}
static void ext4_invalidatepage(struct page *page, unsigned int offset,
......@@ -3033,6 +3133,9 @@ static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
get_block_func = ext4_get_block_write;
dio_flags = DIO_LOCKING;
}
#ifdef CONFIG_EXT4_FS_ENCRYPTION
BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode));
#endif
if (IS_DAX(inode))
ret = dax_do_io(iocb, inode, iter, offset, get_block_func,
ext4_end_io_dio, dio_flags);
......@@ -3097,6 +3200,11 @@ static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
size_t count = iov_iter_count(iter);
ssize_t ret;
#ifdef CONFIG_EXT4_FS_ENCRYPTION
if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode))
return 0;
#endif
/*
* If we are doing data journalling we don't support O_DIRECT
*/
......@@ -3261,6 +3369,13 @@ static int __ext4_block_zero_page_range(handle_t *handle,
/* Uhhuh. Read error. Complain and punt. */
if (!buffer_uptodate(bh))
goto unlock;
if (S_ISREG(inode->i_mode) &&
ext4_encrypted_inode(inode)) {
/* We expect the key to be set. */
BUG_ON(!ext4_has_encryption_key(inode));
BUG_ON(blocksize != PAGE_CACHE_SIZE);
WARN_ON_ONCE(ext4_decrypt_one(inode, page));
}
}
if (ext4_should_journal_data(inode)) {
BUFFER_TRACE(bh, "get write access");
......@@ -4096,7 +4211,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
inode->i_op = &ext4_dir_inode_operations;
inode->i_fop = &ext4_dir_operations;
} else if (S_ISLNK(inode->i_mode)) {
if (ext4_inode_is_fast_symlink(inode)) {
if (ext4_inode_is_fast_symlink(inode) &&
!ext4_encrypted_inode(inode)) {
inode->i_op = &ext4_fast_symlink_inode_operations;
nd_terminate_link(ei->i_data, inode->i_size,
sizeof(ei->i_data) - 1);
......
......@@ -8,12 +8,12 @@
*/
#include <linux/fs.h>
#include <linux/jbd2.h>
#include <linux/capability.h>
#include <linux/time.h>
#include <linux/compat.h>
#include <linux/mount.h>
#include <linux/file.h>
#include <linux/random.h>
#include <asm/uaccess.h>
#include "ext4_jbd2.h"
#include "ext4.h"
......@@ -196,6 +196,16 @@ static long swap_inode_boot_loader(struct super_block *sb,
return err;
}
static int uuid_is_zero(__u8 u[16])
{
int i;
for (i = 0; i < 16; i++)
if (u[i])
return 0;
return 1;
}
long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct inode *inode = file_inode(filp);
......@@ -615,7 +625,78 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
case EXT4_IOC_PRECACHE_EXTENTS:
return ext4_ext_precache(inode);
case EXT4_IOC_SET_ENCRYPTION_POLICY: {
#ifdef CONFIG_EXT4_FS_ENCRYPTION
struct ext4_encryption_policy policy;
int err = 0;
if (copy_from_user(&policy,
(struct ext4_encryption_policy __user *)arg,
sizeof(policy))) {
err = -EFAULT;
goto encryption_policy_out;
}
err = ext4_process_policy(&policy, inode);
encryption_policy_out:
return err;
#else
return -EOPNOTSUPP;
#endif
}
case EXT4_IOC_GET_ENCRYPTION_PWSALT: {
int err, err2;
struct ext4_sb_info *sbi = EXT4_SB(sb);
handle_t *handle;
if (!ext4_sb_has_crypto(sb))
return -EOPNOTSUPP;
if (uuid_is_zero(sbi->s_es->s_encrypt_pw_salt)) {
err = mnt_want_write_file(filp);
if (err)
return err;
handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
goto pwsalt_err_exit;
}
err = ext4_journal_get_write_access(handle, sbi->s_sbh);
if (err)
goto pwsalt_err_journal;
generate_random_uuid(sbi->s_es->s_encrypt_pw_salt);
err = ext4_handle_dirty_metadata(handle, NULL,
sbi->s_sbh);
pwsalt_err_journal:
err2 = ext4_journal_stop(handle);
if (err2 && !err)
err = err2;
pwsalt_err_exit:
mnt_drop_write_file(filp);
if (err)
return err;
}
if (copy_to_user((void *) arg, sbi->s_es->s_encrypt_pw_salt,
16))
return -EFAULT;
return 0;
}
case EXT4_IOC_GET_ENCRYPTION_POLICY: {
#ifdef CONFIG_EXT4_FS_ENCRYPTION
struct ext4_encryption_policy policy;
int err = 0;
if (!ext4_encrypted_inode(inode))
return -ENOENT;
err = ext4_get_policy(inode, &policy);
if (err)
return err;
if (copy_to_user((void *)arg, &policy, sizeof(policy)))
return -EFAULT;
return 0;
#else
return -EOPNOTSUPP;
#endif
}
default:
return -ENOTTY;
}
......@@ -680,6 +761,9 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case FITRIM:
case EXT4_IOC_RESIZE_FS:
case EXT4_IOC_PRECACHE_EXTENTS:
case EXT4_IOC_SET_ENCRYPTION_POLICY:
case EXT4_IOC_GET_ENCRYPTION_PWSALT:
case EXT4_IOC_GET_ENCRYPTION_POLICY:
break;
default:
return -ENOIOCTLCMD;
......
此差异已折叠。
......@@ -8,7 +8,6 @@
#include <linux/fs.h>
#include <linux/time.h>
#include <linux/jbd2.h>
#include <linux/highuid.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
......@@ -24,7 +23,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/ratelimit.h>
#include "ext4_jbd2.h"
#include "xattr.h"
......@@ -68,6 +66,10 @@ static void ext4_finish_bio(struct bio *bio)
bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
#ifdef CONFIG_EXT4_FS_ENCRYPTION
struct page *data_page = NULL;
struct ext4_crypto_ctx *ctx = NULL;
#endif
struct buffer_head *bh, *head;
unsigned bio_start = bvec->bv_offset;
unsigned bio_end = bio_start + bvec->bv_len;
......@@ -77,6 +79,15 @@ static void ext4_finish_bio(struct bio *bio)
if (!page)
continue;
#ifdef CONFIG_EXT4_FS_ENCRYPTION
if (!page->mapping) {
/* The bounce data pages are unmapped. */
data_page = page;
ctx = (struct ext4_crypto_ctx *)page_private(data_page);
page = ctx->control_page;
}
#endif
if (error) {
SetPageError(page);
set_bit(AS_EIO, &page->mapping->flags);
......@@ -101,8 +112,13 @@ static void ext4_finish_bio(struct bio *bio)
} while ((bh = bh->b_this_page) != head);
bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
local_irq_restore(flags);
if (!under_io)
if (!under_io) {
#ifdef CONFIG_EXT4_FS_ENCRYPTION
if (ctx)
ext4_restore_control_page(data_page);
#endif
end_page_writeback(page);
}
}
}
......@@ -377,6 +393,7 @@ static int io_submit_init_bio(struct ext4_io_submit *io,
static int io_submit_add_bh(struct ext4_io_submit *io,
struct inode *inode,
struct page *page,
struct buffer_head *bh)
{
int ret;
......@@ -390,7 +407,7 @@ static int io_submit_add_bh(struct ext4_io_submit *io,
if (ret)
return ret;
}
ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh));
if (ret != bh->b_size)
goto submit_and_retry;
io->io_next_block++;
......@@ -403,6 +420,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
struct writeback_control *wbc,
bool keep_towrite)
{
struct page *data_page = NULL;
struct inode *inode = page->mapping->host;
unsigned block_start, blocksize;
struct buffer_head *bh, *head;
......@@ -462,19 +480,29 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
set_buffer_async_write(bh);
} while ((bh = bh->b_this_page) != head);
/* Now submit buffers to write */
bh = head = page_buffers(page);
if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
data_page = ext4_encrypt(inode, page);
if (IS_ERR(data_page)) {
ret = PTR_ERR(data_page);
data_page = NULL;
goto out;
}
}
/* Now submit buffers to write */
do {
if (!buffer_async_write(bh))
continue;
ret = io_submit_add_bh(io, inode, bh);
ret = io_submit_add_bh(io, inode,
data_page ? data_page : page, bh);
if (ret) {
/*
* We only get here on ENOMEM. Not much else
* we can do but mark the page as dirty, and
* better luck next time.
*/
redirty_page_for_writepage(wbc, page);
break;
}
nr_submitted++;
......@@ -483,6 +511,11 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
/* Error stopped previous loop? Clean up buffers... */
if (ret) {
out:
if (data_page)
ext4_restore_control_page(data_page);
printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
redirty_page_for_writepage(wbc, page);
do {
clear_buffer_async_write(bh);
bh = bh->b_this_page;
......
/*
* linux/fs/ext4/readpage.c
*
* Copyright (C) 2002, Linus Torvalds.
* Copyright (C) 2015, Google, Inc.
*
* This was originally taken from fs/mpage.c
*
* The intent is the ext4_mpage_readpages() function here is intended
* to replace mpage_readpages() in the general case, not just for
* encrypted files. It has some limitations (see below), where it
* will fall back to read_block_full_page(), but these limitations
* should only be hit when page_size != block_size.
*
* This will allow us to attach a callback function to support ext4
* encryption.
*
* If anything unusual happens, such as:
*
* - encountering a page which has buffers
* - encountering a page which has a non-hole after a hole
* - encountering a page with non-contiguous blocks
*
* then this code just gives up and calls the buffer_head-based read function.
* It does handle a page which has holes at the end - that is a common case:
* the end-of-file on blocksize < PAGE_CACHE_SIZE setups.
*
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/kdev_t.h>
#include <linux/gfp.h>
#include <linux/bio.h>
#include <linux/fs.h>
#include <linux/buffer_head.h>
#include <linux/blkdev.h>
#include <linux/highmem.h>
#include <linux/prefetch.h>
#include <linux/mpage.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
#include <linux/pagevec.h>
#include <linux/cleancache.h>
#include "ext4.h"
/*
* Call ext4_decrypt on every single page, reusing the encryption
* context.
*/
static void completion_pages(struct work_struct *work)
{
#ifdef CONFIG_EXT4_FS_ENCRYPTION
struct ext4_crypto_ctx *ctx =
container_of(work, struct ext4_crypto_ctx, work);
struct bio *bio = ctx->bio;
struct bio_vec *bv;
int i;
bio_for_each_segment_all(bv, bio, i) {
struct page *page = bv->bv_page;
int ret = ext4_decrypt(ctx, page);
if (ret) {
WARN_ON_ONCE(1);
SetPageError(page);
} else
SetPageUptodate(page);
unlock_page(page);
}
ext4_release_crypto_ctx(ctx);
bio_put(bio);
#else
BUG();
#endif
}
static inline bool ext4_bio_encrypted(struct bio *bio)
{
#ifdef CONFIG_EXT4_FS_ENCRYPTION
return unlikely(bio->bi_private != NULL);
#else
return false;
#endif
}
/*
* I/O completion handler for multipage BIOs.
*
* The mpage code never puts partial pages into a BIO (except for end-of-file).
* If a page does not map to a contiguous run of blocks then it simply falls
* back to block_read_full_page().
*
* Why is this? If a page's completion depends on a number of different BIOs
* which can complete in any order (or at the same time) then determining the
* status of that page is hard. See end_buffer_async_read() for the details.
* There is no point in duplicating all that complexity.
*/
static void mpage_end_io(struct bio *bio, int err)
{
struct bio_vec *bv;
int i;
if (ext4_bio_encrypted(bio)) {
struct ext4_crypto_ctx *ctx = bio->bi_private;
if (err) {
ext4_release_crypto_ctx(ctx);
} else {
INIT_WORK(&ctx->work, completion_pages);
ctx->bio = bio;
queue_work(ext4_read_workqueue, &ctx->work);
return;
}
}
bio_for_each_segment_all(bv, bio, i) {
struct page *page = bv->bv_page;
if (!err) {
SetPageUptodate(page);
} else {
ClearPageUptodate(page);
SetPageError(page);
}
unlock_page(page);
}
bio_put(bio);
}
int ext4_mpage_readpages(struct address_space *mapping,
struct list_head *pages, struct page *page,
unsigned nr_pages)
{
struct bio *bio = NULL;
unsigned page_idx;
sector_t last_block_in_bio = 0;
struct inode *inode = mapping->host;
const unsigned blkbits = inode->i_blkbits;
const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
const unsigned blocksize = 1 << blkbits;
sector_t block_in_file;
sector_t last_block;
sector_t last_block_in_file;
sector_t blocks[MAX_BUF_PER_PAGE];
unsigned page_block;
struct block_device *bdev = inode->i_sb->s_bdev;
int length;
unsigned relative_block = 0;
struct ext4_map_blocks map;
map.m_pblk = 0;
map.m_lblk = 0;
map.m_len = 0;
map.m_flags = 0;
for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
int fully_mapped = 1;
unsigned first_hole = blocks_per_page;
prefetchw(&page->flags);
if (pages) {
page = list_entry(pages->prev, struct page, lru);
list_del(&page->lru);
if (add_to_page_cache_lru(page, mapping,
page->index, GFP_KERNEL))
goto next_page;
}
if (page_has_buffers(page))
goto confused;
block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
last_block = block_in_file + nr_pages * blocks_per_page;
last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
if (last_block > last_block_in_file)
last_block = last_block_in_file;
page_block = 0;
/*
* Map blocks using the previous result first.
*/
if ((map.m_flags & EXT4_MAP_MAPPED) &&
block_in_file > map.m_lblk &&
block_in_file < (map.m_lblk + map.m_len)) {
unsigned map_offset = block_in_file - map.m_lblk;
unsigned last = map.m_len - map_offset;
for (relative_block = 0; ; relative_block++) {
if (relative_block == last) {
/* needed? */
map.m_flags &= ~EXT4_MAP_MAPPED;
break;
}
if (page_block == blocks_per_page)
break;
blocks[page_block] = map.m_pblk + map_offset +
relative_block;
page_block++;
block_in_file++;
}
}
/*
* Then do more ext4_map_blocks() calls until we are
* done with this page.
*/
while (page_block < blocks_per_page) {
if (block_in_file < last_block) {
map.m_lblk = block_in_file;
map.m_len = last_block - block_in_file;
if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
set_error_page:
SetPageError(page);
zero_user_segment(page, 0,
PAGE_CACHE_SIZE);
unlock_page(page);
goto next_page;
}
}
if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
fully_mapped = 0;
if (first_hole == blocks_per_page)
first_hole = page_block;
page_block++;
block_in_file++;
continue;
}
if (first_hole != blocks_per_page)
goto confused; /* hole -> non-hole */
/* Contiguous blocks? */
if (page_block && blocks[page_block-1] != map.m_pblk-1)
goto confused;
for (relative_block = 0; ; relative_block++) {
if (relative_block == map.m_len) {
/* needed? */
map.m_flags &= ~EXT4_MAP_MAPPED;
break;
} else if (page_block == blocks_per_page)
break;
blocks[page_block] = map.m_pblk+relative_block;
page_block++;
block_in_file++;
}
}
if (first_hole != blocks_per_page) {
zero_user_segment(page, first_hole << blkbits,
PAGE_CACHE_SIZE);
if (first_hole == 0) {
SetPageUptodate(page);
unlock_page(page);
goto next_page;
}
} else if (fully_mapped) {
SetPageMappedToDisk(page);
}
if (fully_mapped && blocks_per_page == 1 &&
!PageUptodate(page) && cleancache_get_page(page) == 0) {
SetPageUptodate(page);
goto confused;
}
/*
* This page will go to BIO. Do we need to send this
* BIO off first?
*/
if (bio && (last_block_in_bio != blocks[0] - 1)) {
submit_and_realloc:
submit_bio(READ, bio);
bio = NULL;
}
if (bio == NULL) {
struct ext4_crypto_ctx *ctx = NULL;
if (ext4_encrypted_inode(inode) &&
S_ISREG(inode->i_mode)) {
ctx = ext4_get_crypto_ctx(inode);
if (IS_ERR(ctx))
goto set_error_page;
}
bio = bio_alloc(GFP_KERNEL,
min_t(int, nr_pages, bio_get_nr_vecs(bdev)));
if (!bio) {
if (ctx)
ext4_release_crypto_ctx(ctx);
goto set_error_page;
}
bio->bi_bdev = bdev;
bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
bio->bi_end_io = mpage_end_io;
bio->bi_private = ctx;
}
length = first_hole << blkbits;
if (bio_add_page(bio, page, length, 0) < length)
goto submit_and_realloc;
if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
(relative_block == map.m_len)) ||
(first_hole != blocks_per_page)) {
submit_bio(READ, bio);
bio = NULL;
} else
last_block_in_bio = blocks[blocks_per_page - 1];
goto next_page;
confused:
if (bio) {
submit_bio(READ, bio);
bio = NULL;
}
if (!PageUptodate(page))
block_read_full_page(page, ext4_get_block);
else
unlock_page(page);
next_page:
if (pages)
page_cache_release(page);
}
BUG_ON(pages && !list_empty(pages));
if (bio)
submit_bio(READ, bio);
return 0;
}
......@@ -21,7 +21,6 @@
#include <linux/fs.h>
#include <linux/time.h>
#include <linux/vmalloc.h>
#include <linux/jbd2.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/blkdev.h>
......@@ -323,22 +322,6 @@ static void save_error_info(struct super_block *sb, const char *func,
ext4_commit_super(sb, 1);
}
/*
* The del_gendisk() function uninitializes the disk-specific data
* structures, including the bdi structure, without telling anyone
* else. Once this happens, any attempt to call mark_buffer_dirty()
* (for example, by ext4_commit_super), will cause a kernel OOPS.
* This is a kludge to prevent these oops until we can put in a proper
* hook in del_gendisk() to inform the VFS and file system layers.
*/
static int block_device_ejected(struct super_block *sb)
{
struct inode *bd_inode = sb->s_bdev->bd_inode;
struct backing_dev_info *bdi = inode_to_bdi(bd_inode);
return bdi->dev == NULL;
}
static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
{
struct super_block *sb = journal->j_private;
......@@ -893,6 +876,9 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
atomic_set(&ei->i_ioend_count, 0);
atomic_set(&ei->i_unwritten, 0);
INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
#ifdef CONFIG_EXT4_FS_ENCRYPTION
ei->i_encryption_key.mode = EXT4_ENCRYPTION_MODE_INVALID;
#endif
return &ei->vfs_inode;
}
......@@ -1120,7 +1106,7 @@ enum {
Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev,
Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit,
Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
Opt_data_err_abort, Opt_data_err_ignore,
Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption,
Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
......@@ -1211,6 +1197,7 @@ static const match_table_t tokens = {
{Opt_init_itable, "init_itable"},
{Opt_noinit_itable, "noinit_itable"},
{Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
{Opt_test_dummy_encryption, "test_dummy_encryption"},
{Opt_removed, "check=none"}, /* mount option from ext2/3 */
{Opt_removed, "nocheck"}, /* mount option from ext2/3 */
{Opt_removed, "reservation"}, /* mount option from ext2/3 */
......@@ -1412,6 +1399,7 @@ static const struct mount_opts {
{Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
{Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT},
{Opt_max_dir_size_kb, 0, MOPT_GTE0},
{Opt_test_dummy_encryption, 0, MOPT_GTE0},
{Opt_err, 0, 0}
};
......@@ -1588,6 +1576,15 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
}
*journal_ioprio =
IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, arg);
} else if (token == Opt_test_dummy_encryption) {
#ifdef CONFIG_EXT4_FS_ENCRYPTION
sbi->s_mount_flags |= EXT4_MF_TEST_DUMMY_ENCRYPTION;
ext4_msg(sb, KERN_WARNING,
"Test dummy encryption mode enabled");
#else
ext4_msg(sb, KERN_WARNING,
"Test dummy encryption mount option ignored");
#endif
} else if (m->flags & MOPT_DATAJ) {
if (is_remount) {
if (!sbi->s_journal)
......@@ -2685,11 +2682,13 @@ static struct attribute *ext4_attrs[] = {
EXT4_INFO_ATTR(lazy_itable_init);
EXT4_INFO_ATTR(batched_discard);
EXT4_INFO_ATTR(meta_bg_resize);
EXT4_INFO_ATTR(encryption);
static struct attribute *ext4_feat_attrs[] = {
ATTR_LIST(lazy_itable_init),
ATTR_LIST(batched_discard),
ATTR_LIST(meta_bg_resize),
ATTR_LIST(encryption),
NULL,
};
......@@ -3448,6 +3447,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (sb->s_bdev->bd_part)
sbi->s_sectors_written_start =
part_stat_read(sb->s_bdev->bd_part, sectors[1]);
#ifdef CONFIG_EXT4_FS_ENCRYPTION
/* Modes of operations for file and directory encryption. */
sbi->s_file_encryption_mode = EXT4_ENCRYPTION_MODE_AES_256_XTS;
sbi->s_dir_encryption_mode = EXT4_ENCRYPTION_MODE_INVALID;
#endif
/* Cleanup superblock name */
for (cp = sb->s_id; (cp = strchr(cp, '/'));)
......@@ -3692,6 +3696,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
}
}
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_ENCRYPT) &&
es->s_encryption_level) {
ext4_msg(sb, KERN_ERR, "Unsupported encryption level %d",
es->s_encryption_level);
goto failed_mount;
}
if (sb->s_blocksize != blocksize) {
/* Validate the filesystem blocksize */
if (!sb_set_blocksize(sb, blocksize)) {
......@@ -4054,6 +4065,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
}
}
if (unlikely(sbi->s_mount_flags & EXT4_MF_TEST_DUMMY_ENCRYPTION) &&
!(sb->s_flags & MS_RDONLY) &&
!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_ENCRYPT)) {
EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_ENCRYPT);
ext4_commit_super(sb, 1);
}
/*
* Get the # of file system overhead blocks from the
* superblock if present.
......@@ -4570,7 +4588,7 @@ static int ext4_commit_super(struct super_block *sb, int sync)
struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
int error = 0;
if (!sbh || block_device_ejected(sb))
if (!sbh)
return error;
if (buffer_write_io_error(sbh)) {
/*
......
......@@ -18,12 +18,100 @@
*/
#include <linux/fs.h>
#include <linux/jbd2.h>
#include <linux/namei.h>
#include "ext4.h"
#include "xattr.h"
#ifdef CONFIG_EXT4_FS_ENCRYPTION
static void *ext4_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct page *cpage = NULL;
char *caddr, *paddr = NULL;
struct ext4_str cstr, pstr;
struct inode *inode = dentry->d_inode;
struct ext4_fname_crypto_ctx *ctx = NULL;
struct ext4_encrypted_symlink_data *sd;
loff_t size = min_t(loff_t, i_size_read(inode), PAGE_SIZE - 1);
int res;
u32 plen, max_size = inode->i_sb->s_blocksize;
if (!ext4_encrypted_inode(inode))
return page_follow_link_light(dentry, nd);
ctx = ext4_get_fname_crypto_ctx(inode, inode->i_sb->s_blocksize);
if (IS_ERR(ctx))
return ctx;
if (ext4_inode_is_fast_symlink(inode)) {
caddr = (char *) EXT4_I(dentry->d_inode)->i_data;
max_size = sizeof(EXT4_I(dentry->d_inode)->i_data);
} else {
cpage = read_mapping_page(inode->i_mapping, 0, NULL);
if (IS_ERR(cpage)) {
ext4_put_fname_crypto_ctx(&ctx);
return cpage;
}
caddr = kmap(cpage);
caddr[size] = 0;
}
/* Symlink is encrypted */
sd = (struct ext4_encrypted_symlink_data *)caddr;
cstr.name = sd->encrypted_path;
cstr.len = le32_to_cpu(sd->len);
if ((cstr.len +
sizeof(struct ext4_encrypted_symlink_data) - 1) >
max_size) {
/* Symlink data on the disk is corrupted */
res = -EIO;
goto errout;
}
plen = (cstr.len < EXT4_FNAME_CRYPTO_DIGEST_SIZE*2) ?
EXT4_FNAME_CRYPTO_DIGEST_SIZE*2 : cstr.len;
paddr = kmalloc(plen + 1, GFP_NOFS);
if (!paddr) {
res = -ENOMEM;
goto errout;
}
pstr.name = paddr;
res = _ext4_fname_disk_to_usr(ctx, &cstr, &pstr);
if (res < 0)
goto errout;
/* Null-terminate the name */
if (res <= plen)
paddr[res] = '\0';
nd_set_link(nd, paddr);
ext4_put_fname_crypto_ctx(&ctx);
if (cpage) {
kunmap(cpage);
page_cache_release(cpage);
}
return NULL;
errout:
ext4_put_fname_crypto_ctx(&ctx);
if (cpage) {
kunmap(cpage);
page_cache_release(cpage);
}
kfree(paddr);
return ERR_PTR(res);
}
static void ext4_put_link(struct dentry *dentry, struct nameidata *nd,
void *cookie)
{
struct page *page = cookie;
if (!page) {
kfree(nd_get_link(nd));
} else {
kunmap(page);
page_cache_release(page);
}
}
#endif
static void *ext4_follow_fast_link(struct dentry *dentry, struct nameidata *nd)
{
struct ext4_inode_info *ei = EXT4_I(dentry->d_inode);
nd_set_link(nd, (char *) ei->i_data);
......@@ -32,8 +120,13 @@ static void *ext4_follow_link(struct dentry *dentry, struct nameidata *nd)
const struct inode_operations ext4_symlink_inode_operations = {
.readlink = generic_readlink,
#ifdef CONFIG_EXT4_FS_ENCRYPTION
.follow_link = ext4_follow_link,
.put_link = ext4_put_link,
#else
.follow_link = page_follow_link_light,
.put_link = page_put_link,
#endif
.setattr = ext4_setattr,
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
......@@ -43,7 +136,7 @@ const struct inode_operations ext4_symlink_inode_operations = {
const struct inode_operations ext4_fast_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = ext4_follow_link,
.follow_link = ext4_follow_fast_link,
.setattr = ext4_setattr,
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
......
......@@ -55,7 +55,6 @@
#include <linux/slab.h>
#include <linux/mbcache.h>
#include <linux/quotaops.h>
#include <linux/rwsem.h>
#include "ext4_jbd2.h"
#include "ext4.h"
#include "xattr.h"
......@@ -639,8 +638,7 @@ ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
free += EXT4_XATTR_LEN(name_len);
}
if (i->value) {
if (free < EXT4_XATTR_SIZE(i->value_len) ||
free < EXT4_XATTR_LEN(name_len) +
if (free < EXT4_XATTR_LEN(name_len) +
EXT4_XATTR_SIZE(i->value_len))
return -ENOSPC;
}
......
......@@ -23,6 +23,7 @@
#define EXT4_XATTR_INDEX_SECURITY 6
#define EXT4_XATTR_INDEX_SYSTEM 7
#define EXT4_XATTR_INDEX_RICHACL 8
#define EXT4_XATTR_INDEX_ENCRYPTION 9
struct ext4_xattr_header {
__le32 h_magic; /* magic number for identification */
......@@ -98,6 +99,8 @@ extern const struct xattr_handler ext4_xattr_user_handler;
extern const struct xattr_handler ext4_xattr_trusted_handler;
extern const struct xattr_handler ext4_xattr_security_handler;
#define EXT4_XATTR_NAME_ENCRYPTION_CONTEXT "c"
extern ssize_t ext4_listxattr(struct dentry *, char *, size_t);
extern int ext4_xattr_get(struct inode *, int, const char *, void *, size_t);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册