提交 e3cf5d5d 编写于 作者: T Theodore Ts'o

ext4: prepare to drop EXT4_STATE_DELALLOC_RESERVED

The EXT4_STATE_DELALLOC_RESERVED flag was originally implemented
because it was too hard to make sure the mballoc and get_block flags
could be reliably passed down through all of the codepaths that end up
calling ext4_mb_new_blocks().

Since then, we have mb_flags passed down through most of the code
paths, so getting rid of EXT4_STATE_DELALLOC_RESERVED isn't as tricky
as it used to.

This commit plumbs in the last of what is required, and then adds a
WARN_ON check to make sure we haven't missed anything.  If this passes
a full regression test run, we can then drop
EXT4_STATE_DELALLOC_RESERVED.
Signed-off-by: NTheodore Ts'o <tytso@mit.edu>
Reviewed-by: NJan Kara <jack@suse.cz>
上级 a5211002
...@@ -636,8 +636,7 @@ ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode, ...@@ -636,8 +636,7 @@ ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
* Account for the allocated meta blocks. We will never * Account for the allocated meta blocks. We will never
* fail EDQUOT for metdata, but we do account for it. * fail EDQUOT for metdata, but we do account for it.
*/ */
if (!(*errp) && if (!(*errp) && (flags & EXT4_MB_DELALLOC_RESERVED)) {
ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) {
spin_lock(&EXT4_I(inode)->i_block_reservation_lock); spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
dquot_alloc_block_nofail(inode, dquot_alloc_block_nofail(inode,
......
...@@ -1933,6 +1933,8 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, ...@@ -1933,6 +1933,8 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
ext4_lblk_t next; ext4_lblk_t next;
int mb_flags = 0, unwritten; int mb_flags = 0, unwritten;
if (gb_flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
mb_flags |= EXT4_MB_DELALLOC_RESERVED;
if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
return -EIO; return -EIO;
...@@ -2054,7 +2056,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, ...@@ -2054,7 +2056,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
* We're gonna add a new leaf in the tree. * We're gonna add a new leaf in the tree.
*/ */
if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
mb_flags = EXT4_MB_USE_RESERVED; mb_flags |= EXT4_MB_USE_RESERVED;
err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags, err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
ppath, newext); ppath, newext);
if (err) if (err)
...@@ -4438,6 +4440,8 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, ...@@ -4438,6 +4440,8 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
ar.flags = 0; ar.flags = 0;
if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE) if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
ar.flags |= EXT4_MB_HINT_NOPREALLOC; ar.flags |= EXT4_MB_HINT_NOPREALLOC;
if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
ar.flags |= EXT4_MB_DELALLOC_RESERVED;
newblock = ext4_mb_new_blocks(handle, &ar, &err); newblock = ext4_mb_new_blocks(handle, &ar, &err);
if (!newblock) if (!newblock)
goto out2; goto out2;
......
...@@ -333,7 +333,9 @@ static int ext4_alloc_branch(handle_t *handle, ...@@ -333,7 +333,9 @@ static int ext4_alloc_branch(handle_t *handle,
new_blocks[i] = ext4_mb_new_blocks(handle, ar, &err); new_blocks[i] = ext4_mb_new_blocks(handle, ar, &err);
} else } else
ar->goal = new_blocks[i] = ext4_new_meta_blocks(handle, ar->goal = new_blocks[i] = ext4_new_meta_blocks(handle,
ar->inode, ar->goal, 0, NULL, &err); ar->inode, ar->goal,
ar->flags & EXT4_MB_DELALLOC_RESERVED,
NULL, &err);
if (err) { if (err) {
i--; i--;
goto failed; goto failed;
...@@ -572,6 +574,8 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, ...@@ -572,6 +574,8 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
ar.logical = map->m_lblk; ar.logical = map->m_lblk;
if (S_ISREG(inode->i_mode)) if (S_ISREG(inode->i_mode))
ar.flags = EXT4_MB_HINT_DATA; ar.flags = EXT4_MB_HINT_DATA;
if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
ar.flags |= EXT4_MB_DELALLOC_RESERVED;
ar.goal = ext4_find_goal(inode, map->m_lblk, partial); ar.goal = ext4_find_goal(inode, map->m_lblk, partial);
......
...@@ -4415,9 +4415,12 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, ...@@ -4415,9 +4415,12 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
* EDQUOT check, as blocks and quotas have been already * EDQUOT check, as blocks and quotas have been already
* reserved when data being copied into pagecache. * reserved when data being copied into pagecache.
*/ */
if (ext4_test_inode_state(ar->inode, EXT4_STATE_DELALLOC_RESERVED)) if (ext4_test_inode_state(ar->inode, EXT4_STATE_DELALLOC_RESERVED)) {
WARN_ON((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0);
ar->flags |= EXT4_MB_DELALLOC_RESERVED; ar->flags |= EXT4_MB_DELALLOC_RESERVED;
else { }
if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) {
/* Without delayed allocation we need to verify /* Without delayed allocation we need to verify
* there is enough free blocks to do block allocation * there is enough free blocks to do block allocation
* and verify allocation doesn't exceed the quota limits. * and verify allocation doesn't exceed the quota limits.
...@@ -4528,8 +4531,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, ...@@ -4528,8 +4531,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
if (inquota && ar->len < inquota) if (inquota && ar->len < inquota)
dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len)); dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
if (!ar->len) { if (!ar->len) {
if (!ext4_test_inode_state(ar->inode, if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0)
EXT4_STATE_DELALLOC_RESERVED))
/* release all the reserved blocks if non delalloc */ /* release all the reserved blocks if non delalloc */
percpu_counter_sub(&sbi->s_dirtyclusters_counter, percpu_counter_sub(&sbi->s_dirtyclusters_counter,
reserv_clstrs); reserv_clstrs);
......
...@@ -899,14 +899,8 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, ...@@ -899,14 +899,8 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
/*
* take i_data_sem because we will test
* i_delalloc_reserved_flag in ext4_mb_new_blocks
*/
down_read(&EXT4_I(inode)->i_data_sem);
block = ext4_new_meta_blocks(handle, inode, goal, 0, block = ext4_new_meta_blocks(handle, inode, goal, 0,
NULL, &error); NULL, &error);
up_read((&EXT4_I(inode)->i_data_sem));
if (error) if (error)
goto cleanup; goto cleanup;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册