提交 0e855ac8 编写于 作者: A Aneesh Kumar K.V 提交者: Theodore Ts'o

ext4: Convert truncate_mutex to read write semaphore.

We are currently taking the truncate_mutex for every read. This would have
performance impact on large CPU configuration. Convert the lock to read write
semaphore and take read lock when we are trying to read the file.
Signed-off-by: NAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
上级 c278bfec
...@@ -526,7 +526,7 @@ static inline int rsv_is_empty(struct ext4_reserve_window *rsv) ...@@ -526,7 +526,7 @@ static inline int rsv_is_empty(struct ext4_reserve_window *rsv)
* when setting the reservation window size through ioctl before the file * when setting the reservation window size through ioctl before the file
* is open for write (needs block allocation). * is open for write (needs block allocation).
* *
* Needs truncate_mutex protection prior to call this function. * Needs down_write(i_data_sem) protection prior to call this function.
*/ */
void ext4_init_block_alloc_info(struct inode *inode) void ext4_init_block_alloc_info(struct inode *inode)
{ {
......
...@@ -1565,7 +1565,7 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, ...@@ -1565,7 +1565,7 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
* This routine returns max. credits that the extent tree can consume. * This routine returns max. credits that the extent tree can consume.
* It should be OK for low-performance paths like ->writepage() * It should be OK for low-performance paths like ->writepage()
* To allow many writing processes to fit into a single transaction, * To allow many writing processes to fit into a single transaction,
* the caller should calculate credits under truncate_mutex and * the caller should calculate credits under i_data_sem and
* pass the actual path. * pass the actual path.
*/ */
int ext4_ext_calc_credits_for_insert(struct inode *inode, int ext4_ext_calc_credits_for_insert(struct inode *inode,
...@@ -2131,7 +2131,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, ...@@ -2131,7 +2131,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
/* /*
* Need to be called with * Need to be called with
* mutex_lock(&EXT4_I(inode)->truncate_mutex); * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
* (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
*/ */
int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
ext4_lblk_t iblock, ext4_lblk_t iblock,
...@@ -2350,7 +2351,7 @@ void ext4_ext_truncate(struct inode * inode, struct page *page) ...@@ -2350,7 +2351,7 @@ void ext4_ext_truncate(struct inode * inode, struct page *page)
if (page) if (page)
ext4_block_truncate_page(handle, page, mapping, inode->i_size); ext4_block_truncate_page(handle, page, mapping, inode->i_size);
mutex_lock(&EXT4_I(inode)->truncate_mutex); down_write(&EXT4_I(inode)->i_data_sem);
ext4_ext_invalidate_cache(inode); ext4_ext_invalidate_cache(inode);
/* /*
...@@ -2386,7 +2387,7 @@ void ext4_ext_truncate(struct inode * inode, struct page *page) ...@@ -2386,7 +2387,7 @@ void ext4_ext_truncate(struct inode * inode, struct page *page)
if (inode->i_nlink) if (inode->i_nlink)
ext4_orphan_del(handle, inode); ext4_orphan_del(handle, inode);
mutex_unlock(&EXT4_I(inode)->truncate_mutex); up_write(&EXT4_I(inode)->i_data_sem);
ext4_journal_stop(handle); ext4_journal_stop(handle);
} }
...@@ -2450,7 +2451,7 @@ long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len) ...@@ -2450,7 +2451,7 @@ long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
* modify 1 super block, 1 block bitmap and 1 group descriptor. * modify 1 super block, 1 block bitmap and 1 group descriptor.
*/ */
credits = EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + 3; credits = EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + 3;
mutex_lock(&EXT4_I(inode)->truncate_mutex) down_write((&EXT4_I(inode)->i_data_sem));
retry: retry:
while (ret >= 0 && ret < max_blocks) { while (ret >= 0 && ret < max_blocks) {
block = block + ret; block = block + ret;
...@@ -2507,7 +2508,7 @@ long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len) ...@@ -2507,7 +2508,7 @@ long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry; goto retry;
mutex_unlock(&EXT4_I(inode)->truncate_mutex) up_write((&EXT4_I(inode)->i_data_sem));
/* /*
* Time to update the file size. * Time to update the file size.
* Update only when preallocation was requested beyond the file size. * Update only when preallocation was requested beyond the file size.
......
...@@ -37,9 +37,9 @@ static int ext4_release_file (struct inode * inode, struct file * filp) ...@@ -37,9 +37,9 @@ static int ext4_release_file (struct inode * inode, struct file * filp)
if ((filp->f_mode & FMODE_WRITE) && if ((filp->f_mode & FMODE_WRITE) &&
(atomic_read(&inode->i_writecount) == 1)) (atomic_read(&inode->i_writecount) == 1))
{ {
mutex_lock(&EXT4_I(inode)->truncate_mutex); down_write(&EXT4_I(inode)->i_data_sem);
ext4_discard_reservation(inode); ext4_discard_reservation(inode);
mutex_unlock(&EXT4_I(inode)->truncate_mutex); up_write(&EXT4_I(inode)->i_data_sem);
} }
if (is_dx(inode) && filp->private_data) if (is_dx(inode) && filp->private_data)
ext4_htree_free_dir_info(filp->private_data); ext4_htree_free_dir_info(filp->private_data);
......
...@@ -308,7 +308,7 @@ static int ext4_block_to_path(struct inode *inode, ...@@ -308,7 +308,7 @@ static int ext4_block_to_path(struct inode *inode,
final = ptrs; final = ptrs;
} else { } else {
ext4_warning(inode->i_sb, "ext4_block_to_path", ext4_warning(inode->i_sb, "ext4_block_to_path",
"block %u > max", "block %lu > max",
i_block + direct_blocks + i_block + direct_blocks +
indirect_blocks + double_blocks); indirect_blocks + double_blocks);
} }
...@@ -345,7 +345,7 @@ static int ext4_block_to_path(struct inode *inode, ...@@ -345,7 +345,7 @@ static int ext4_block_to_path(struct inode *inode,
* the whole chain, all way to the data (returns %NULL, *err == 0). * the whole chain, all way to the data (returns %NULL, *err == 0).
* *
* Need to be called with * Need to be called with
* mutex_lock(&EXT4_I(inode)->truncate_mutex) * down_read(&EXT4_I(inode)->i_data_sem)
*/ */
static Indirect *ext4_get_branch(struct inode *inode, int depth, static Indirect *ext4_get_branch(struct inode *inode, int depth,
ext4_lblk_t *offsets, ext4_lblk_t *offsets,
...@@ -777,7 +777,8 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode, ...@@ -777,7 +777,8 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode,
* *
* *
* Need to be called with * Need to be called with
* mutex_lock(&EXT4_I(inode)->truncate_mutex) * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
* (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
*/ */
int ext4_get_blocks_handle(handle_t *handle, struct inode *inode, int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
ext4_lblk_t iblock, unsigned long maxblocks, ext4_lblk_t iblock, unsigned long maxblocks,
...@@ -865,7 +866,7 @@ int ext4_get_blocks_handle(handle_t *handle, struct inode *inode, ...@@ -865,7 +866,7 @@ int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
err = ext4_splice_branch(handle, inode, iblock, err = ext4_splice_branch(handle, inode, iblock,
partial, indirect_blks, count); partial, indirect_blks, count);
/* /*
* i_disksize growing is protected by truncate_mutex. Don't forget to * i_disksize growing is protected by i_data_sem. Don't forget to
* protect it if you're about to implement concurrent * protect it if you're about to implement concurrent
* ext4_get_block() -bzzz * ext4_get_block() -bzzz
*/ */
...@@ -895,6 +896,31 @@ int ext4_get_blocks_handle(handle_t *handle, struct inode *inode, ...@@ -895,6 +896,31 @@ int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
#define DIO_CREDITS (EXT4_RESERVE_TRANS_BLOCKS + 32) #define DIO_CREDITS (EXT4_RESERVE_TRANS_BLOCKS + 32)
int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
unsigned long max_blocks, struct buffer_head *bh,
int create, int extend_disksize)
{
int retval;
if (create) {
down_write((&EXT4_I(inode)->i_data_sem));
} else {
down_read((&EXT4_I(inode)->i_data_sem));
}
if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
retval = ext4_ext_get_blocks(handle, inode, block, max_blocks,
bh, create, extend_disksize);
} else {
retval = ext4_get_blocks_handle(handle, inode, block,
max_blocks, bh, create, extend_disksize);
}
if (create) {
up_write((&EXT4_I(inode)->i_data_sem));
} else {
up_read((&EXT4_I(inode)->i_data_sem));
}
return retval;
}
static int ext4_get_block(struct inode *inode, sector_t iblock, static int ext4_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create) struct buffer_head *bh_result, int create)
{ {
...@@ -1399,7 +1425,7 @@ static int jbd2_journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh) ...@@ -1399,7 +1425,7 @@ static int jbd2_journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
* ext4_file_write() -> generic_file_write() -> __alloc_pages() -> ... * ext4_file_write() -> generic_file_write() -> __alloc_pages() -> ...
* *
* Same applies to ext4_get_block(). We will deadlock on various things like * Same applies to ext4_get_block(). We will deadlock on various things like
* lock_journal and i_truncate_mutex. * lock_journal and i_data_sem
* *
* Setting PF_MEMALLOC here doesn't work - too many internal memory * Setting PF_MEMALLOC here doesn't work - too many internal memory
* allocations fail. * allocations fail.
...@@ -2325,7 +2351,7 @@ void ext4_truncate(struct inode *inode) ...@@ -2325,7 +2351,7 @@ void ext4_truncate(struct inode *inode)
* From here we block out all ext4_get_block() callers who want to * From here we block out all ext4_get_block() callers who want to
* modify the block allocation tree. * modify the block allocation tree.
*/ */
mutex_lock(&ei->truncate_mutex); down_write(&ei->i_data_sem);
if (n == 1) { /* direct blocks */ if (n == 1) { /* direct blocks */
ext4_free_data(handle, inode, NULL, i_data+offsets[0], ext4_free_data(handle, inode, NULL, i_data+offsets[0],
...@@ -2389,7 +2415,7 @@ void ext4_truncate(struct inode *inode) ...@@ -2389,7 +2415,7 @@ void ext4_truncate(struct inode *inode)
ext4_discard_reservation(inode); ext4_discard_reservation(inode);
mutex_unlock(&ei->truncate_mutex); up_write(&ei->i_data_sem);
inode->i_mtime = inode->i_ctime = ext4_current_time(inode); inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode); ext4_mark_inode_dirty(handle, inode);
......
...@@ -199,7 +199,7 @@ int ext4_ioctl (struct inode * inode, struct file * filp, unsigned int cmd, ...@@ -199,7 +199,7 @@ int ext4_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
* need to allocate reservation structure for this inode * need to allocate reservation structure for this inode
* before set the window size * before set the window size
*/ */
mutex_lock(&ei->truncate_mutex); down_write(&ei->i_data_sem);
if (!ei->i_block_alloc_info) if (!ei->i_block_alloc_info)
ext4_init_block_alloc_info(inode); ext4_init_block_alloc_info(inode);
...@@ -207,7 +207,7 @@ int ext4_ioctl (struct inode * inode, struct file * filp, unsigned int cmd, ...@@ -207,7 +207,7 @@ int ext4_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
struct ext4_reserve_window_node *rsv = &ei->i_block_alloc_info->rsv_window_node; struct ext4_reserve_window_node *rsv = &ei->i_block_alloc_info->rsv_window_node;
rsv->rsv_goal_size = rsv_window_size; rsv->rsv_goal_size = rsv_window_size;
} }
mutex_unlock(&ei->truncate_mutex); up_write(&ei->i_data_sem);
return 0; return 0;
} }
case EXT4_IOC_GROUP_EXTEND: { case EXT4_IOC_GROUP_EXTEND: {
......
...@@ -593,7 +593,7 @@ static void init_once(struct kmem_cache *cachep, void *foo) ...@@ -593,7 +593,7 @@ static void init_once(struct kmem_cache *cachep, void *foo)
#ifdef CONFIG_EXT4DEV_FS_XATTR #ifdef CONFIG_EXT4DEV_FS_XATTR
init_rwsem(&ei->xattr_sem); init_rwsem(&ei->xattr_sem);
#endif #endif
mutex_init(&ei->truncate_mutex); init_rwsem(&ei->i_data_sem);
inode_init_once(&ei->vfs_inode); inode_init_once(&ei->vfs_inode);
} }
......
...@@ -1107,27 +1107,10 @@ extern void ext4_ext_init(struct super_block *); ...@@ -1107,27 +1107,10 @@ extern void ext4_ext_init(struct super_block *);
extern void ext4_ext_release(struct super_block *); extern void ext4_ext_release(struct super_block *);
extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset, extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset,
loff_t len); loff_t len);
static inline int extern int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode,
ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block, sector_t block, unsigned long max_blocks,
unsigned long max_blocks, struct buffer_head *bh, struct buffer_head *bh, int create,
int create, int extend_disksize) int extend_disksize);
{
int retval;
mutex_lock(&EXT4_I(inode)->truncate_mutex);
if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
retval = ext4_ext_get_blocks(handle, inode,
(ext4_lblk_t)block, max_blocks,
bh, create, extend_disksize);
} else {
retval = ext4_get_blocks_handle(handle, inode,
(ext4_lblk_t)block, max_blocks,
bh, create, extend_disksize);
}
mutex_unlock(&EXT4_I(inode)->truncate_mutex);
return retval;
}
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _LINUX_EXT4_FS_H */ #endif /* _LINUX_EXT4_FS_H */
...@@ -139,16 +139,16 @@ struct ext4_inode_info { ...@@ -139,16 +139,16 @@ struct ext4_inode_info {
__u16 i_extra_isize; __u16 i_extra_isize;
/* /*
* truncate_mutex is for serialising ext4_truncate() against * i_data_sem is for serialising ext4_truncate() against
* ext4_getblock(). In the 2.4 ext2 design, great chunks of inode's * ext4_getblock(). In the 2.4 ext2 design, great chunks of inode's
* data tree are chopped off during truncate. We can't do that in * data tree are chopped off during truncate. We can't do that in
* ext4 because whenever we perform intermediate commits during * ext4 because whenever we perform intermediate commits during
* truncate, the inode and all the metadata blocks *must* be in a * truncate, the inode and all the metadata blocks *must* be in a
* consistent state which allows truncation of the orphans to restart * consistent state which allows truncation of the orphans to restart
* during recovery. Hence we must fix the get_block-vs-truncate race * during recovery. Hence we must fix the get_block-vs-truncate race
* by other means, so we have truncate_mutex. * by other means, so we have i_data_sem.
*/ */
struct mutex truncate_mutex; struct rw_semaphore i_data_sem;
struct inode vfs_inode; struct inode vfs_inode;
unsigned long i_ext_generation; unsigned long i_ext_generation;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册