提交 955ce5f5 编写于 作者: A Aneesh Kumar K.V 提交者: Theodore Ts'o

ext4: Convert ext4_lock_group to use sb_bgl_lock

We have sb_bgl_lock() and ext4_group_info.bb_state
bit spinlock to protech group information. The later is only
used within mballoc code. Consolidate them to use sb_bgl_lock().
This makes the mballoc.c code much simpler and also avoid
confusion with two locks protecting same info.
Signed-off-by: NAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: N"Theodore Ts'o" <tytso@mit.edu>
上级 eefd7f03
...@@ -326,16 +326,16 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group) ...@@ -326,16 +326,16 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
unlock_buffer(bh); unlock_buffer(bh);
return bh; return bh;
} }
spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group)); ext4_lock_group(sb, block_group);
if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
ext4_init_block_bitmap(sb, bh, block_group, desc); ext4_init_block_bitmap(sb, bh, block_group, desc);
set_bitmap_uptodate(bh); set_bitmap_uptodate(bh);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group)); ext4_unlock_group(sb, block_group);
unlock_buffer(bh); unlock_buffer(bh);
return bh; return bh;
} }
spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group)); ext4_unlock_group(sb, block_group);
if (buffer_uptodate(bh)) { if (buffer_uptodate(bh)) {
/* /*
* if not uninit if bh is uptodate, * if not uninit if bh is uptodate,
...@@ -451,7 +451,7 @@ void ext4_add_groupblocks(handle_t *handle, struct super_block *sb, ...@@ -451,7 +451,7 @@ void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
down_write(&grp->alloc_sem); down_write(&grp->alloc_sem);
for (i = 0, blocks_freed = 0; i < count; i++) { for (i = 0, blocks_freed = 0; i < count; i++) {
BUFFER_TRACE(bitmap_bh, "clear bit"); BUFFER_TRACE(bitmap_bh, "clear bit");
if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group), if (!ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group),
bit + i, bitmap_bh->b_data)) { bit + i, bitmap_bh->b_data)) {
ext4_error(sb, __func__, ext4_error(sb, __func__,
"bit already cleared for block %llu", "bit already cleared for block %llu",
...@@ -461,11 +461,11 @@ void ext4_add_groupblocks(handle_t *handle, struct super_block *sb, ...@@ -461,11 +461,11 @@ void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
blocks_freed++; blocks_freed++;
} }
} }
spin_lock(sb_bgl_lock(sbi, block_group)); ext4_lock_group(sb, block_group);
blk_free_count = blocks_freed + ext4_free_blks_count(sb, desc); blk_free_count = blocks_freed + ext4_free_blks_count(sb, desc);
ext4_free_blks_set(sb, desc, blk_free_count); ext4_free_blks_set(sb, desc, blk_free_count);
desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc); desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
spin_unlock(sb_bgl_lock(sbi, block_group)); ext4_unlock_group(sb, block_group);
percpu_counter_add(&sbi->s_freeblocks_counter, blocks_freed); percpu_counter_add(&sbi->s_freeblocks_counter, blocks_freed);
if (sbi->s_log_groups_per_flex) { if (sbi->s_log_groups_per_flex) {
......
...@@ -963,12 +963,6 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino) ...@@ -963,12 +963,6 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
(ino >= EXT4_FIRST_INO(sb) && (ino >= EXT4_FIRST_INO(sb) &&
ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)); ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count));
} }
static inline spinlock_t *
sb_bgl_lock(struct ext4_sb_info *sbi, unsigned int block_group)
{
return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group);
}
#else #else
/* Assume that user mode programs are passing in an ext4fs superblock, not /* Assume that user mode programs are passing in an ext4fs superblock, not
* a kernel struct super_block. This will allow us to call the feature-test * a kernel struct super_block. This will allow us to call the feature-test
...@@ -1568,33 +1562,31 @@ struct ext4_group_info { ...@@ -1568,33 +1562,31 @@ struct ext4_group_info {
}; };
#define EXT4_GROUP_INFO_NEED_INIT_BIT 0 #define EXT4_GROUP_INFO_NEED_INIT_BIT 0
#define EXT4_GROUP_INFO_LOCKED_BIT 1
#define EXT4_MB_GRP_NEED_INIT(grp) \ #define EXT4_MB_GRP_NEED_INIT(grp) \
(test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state))) (test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state)))
static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group) static inline spinlock_t *ext4_group_lock_ptr(struct super_block *sb,
ext4_group_t group)
{ {
struct ext4_group_info *grinfo = ext4_get_group_info(sb, group); return bgl_lock_ptr(EXT4_SB(sb)->s_blockgroup_lock, group);
}
bit_spin_lock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state)); static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
{
spin_lock(ext4_group_lock_ptr(sb, group));
} }
static inline void ext4_unlock_group(struct super_block *sb, static inline void ext4_unlock_group(struct super_block *sb,
ext4_group_t group) ext4_group_t group)
{ {
struct ext4_group_info *grinfo = ext4_get_group_info(sb, group); spin_unlock(ext4_group_lock_ptr(sb, group));
bit_spin_unlock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
} }
static inline int ext4_is_group_locked(struct super_block *sb, static inline int ext4_is_group_locked(struct super_block *sb,
ext4_group_t group) ext4_group_t group)
{ {
struct ext4_group_info *grinfo = ext4_get_group_info(sb, group); return spin_is_locked(ext4_group_lock_ptr(sb, group));
return bit_spin_is_locked(EXT4_GROUP_INFO_LOCKED_BIT,
&(grinfo->bb_state));
} }
/* /*
......
...@@ -122,16 +122,16 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) ...@@ -122,16 +122,16 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
unlock_buffer(bh); unlock_buffer(bh);
return bh; return bh;
} }
spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group)); ext4_lock_group(sb, block_group);
if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
ext4_init_inode_bitmap(sb, bh, block_group, desc); ext4_init_inode_bitmap(sb, bh, block_group, desc);
set_bitmap_uptodate(bh); set_bitmap_uptodate(bh);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group)); ext4_unlock_group(sb, block_group);
unlock_buffer(bh); unlock_buffer(bh);
return bh; return bh;
} }
spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group)); ext4_unlock_group(sb, block_group);
if (buffer_uptodate(bh)) { if (buffer_uptodate(bh)) {
/* /*
* if not uninit if bh is uptodate, * if not uninit if bh is uptodate,
...@@ -246,9 +246,8 @@ void ext4_free_inode(handle_t *handle, struct inode *inode) ...@@ -246,9 +246,8 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
goto error_return; goto error_return;
/* Ok, now we can actually update the inode bitmaps.. */ /* Ok, now we can actually update the inode bitmaps.. */
spin_lock(sb_bgl_lock(sbi, block_group)); cleared = ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group),
cleared = ext4_clear_bit(bit, bitmap_bh->b_data); bit, bitmap_bh->b_data);
spin_unlock(sb_bgl_lock(sbi, block_group));
if (!cleared) if (!cleared)
ext4_error(sb, "ext4_free_inode", ext4_error(sb, "ext4_free_inode",
"bit already cleared for inode %lu", ino); "bit already cleared for inode %lu", ino);
...@@ -260,7 +259,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode) ...@@ -260,7 +259,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
if (fatal) goto error_return; if (fatal) goto error_return;
if (gdp) { if (gdp) {
spin_lock(sb_bgl_lock(sbi, block_group)); ext4_lock_group(sb, block_group);
count = ext4_free_inodes_count(sb, gdp) + 1; count = ext4_free_inodes_count(sb, gdp) + 1;
ext4_free_inodes_set(sb, gdp, count); ext4_free_inodes_set(sb, gdp, count);
if (is_directory) { if (is_directory) {
...@@ -276,7 +275,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode) ...@@ -276,7 +275,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
} }
gdp->bg_checksum = ext4_group_desc_csum(sbi, gdp->bg_checksum = ext4_group_desc_csum(sbi,
block_group, gdp); block_group, gdp);
spin_unlock(sb_bgl_lock(sbi, block_group)); ext4_unlock_group(sb, block_group);
percpu_counter_inc(&sbi->s_freeinodes_counter); percpu_counter_inc(&sbi->s_freeinodes_counter);
if (is_directory) if (is_directory)
percpu_counter_dec(&sbi->s_dirs_counter); percpu_counter_dec(&sbi->s_dirs_counter);
...@@ -707,10 +706,10 @@ static int find_group_other(struct super_block *sb, struct inode *parent, ...@@ -707,10 +706,10 @@ static int find_group_other(struct super_block *sb, struct inode *parent,
/* /*
* claim the inode from the inode bitmap. If the group * claim the inode from the inode bitmap. If the group
* is uninit we need to take the groups's sb_bgl_lock * is uninit we need to take the groups's ext4_group_lock
* and clear the uninit flag. The inode bitmap update * and clear the uninit flag. The inode bitmap update
* and group desc uninit flag clear should be done * and group desc uninit flag clear should be done
* after holding sb_bgl_lock so that ext4_read_inode_bitmap * after holding ext4_group_lock so that ext4_read_inode_bitmap
* doesn't race with the ext4_claim_inode * doesn't race with the ext4_claim_inode
*/ */
static int ext4_claim_inode(struct super_block *sb, static int ext4_claim_inode(struct super_block *sb,
...@@ -721,7 +720,7 @@ static int ext4_claim_inode(struct super_block *sb, ...@@ -721,7 +720,7 @@ static int ext4_claim_inode(struct super_block *sb,
struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL); struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
spin_lock(sb_bgl_lock(sbi, group)); ext4_lock_group(sb, group);
if (ext4_set_bit(ino, inode_bitmap_bh->b_data)) { if (ext4_set_bit(ino, inode_bitmap_bh->b_data)) {
/* not a free inode */ /* not a free inode */
retval = 1; retval = 1;
...@@ -730,7 +729,7 @@ static int ext4_claim_inode(struct super_block *sb, ...@@ -730,7 +729,7 @@ static int ext4_claim_inode(struct super_block *sb,
ino++; ino++;
if ((group == 0 && ino < EXT4_FIRST_INO(sb)) || if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
ino > EXT4_INODES_PER_GROUP(sb)) { ino > EXT4_INODES_PER_GROUP(sb)) {
spin_unlock(sb_bgl_lock(sbi, group)); ext4_unlock_group(sb, group);
ext4_error(sb, __func__, ext4_error(sb, __func__,
"reserved inode or inode > inodes count - " "reserved inode or inode > inodes count - "
"block_group = %u, inode=%lu", group, "block_group = %u, inode=%lu", group,
...@@ -779,7 +778,7 @@ static int ext4_claim_inode(struct super_block *sb, ...@@ -779,7 +778,7 @@ static int ext4_claim_inode(struct super_block *sb,
} }
gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp); gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
err_ret: err_ret:
spin_unlock(sb_bgl_lock(sbi, group)); ext4_unlock_group(sb, group);
return retval; return retval;
} }
...@@ -935,7 +934,7 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode) ...@@ -935,7 +934,7 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode)
} }
free = 0; free = 0;
spin_lock(sb_bgl_lock(sbi, group)); ext4_lock_group(sb, group);
/* recheck and clear flag under lock if we still need to */ /* recheck and clear flag under lock if we still need to */
if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
free = ext4_free_blocks_after_init(sb, group, gdp); free = ext4_free_blocks_after_init(sb, group, gdp);
...@@ -944,7 +943,7 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode) ...@@ -944,7 +943,7 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode)
gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp->bg_checksum = ext4_group_desc_csum(sbi, group,
gdp); gdp);
} }
spin_unlock(sb_bgl_lock(sbi, group)); ext4_unlock_group(sb, group);
/* Don't need to dirty bitmap block if we didn't change it */ /* Don't need to dirty bitmap block if we didn't change it */
if (free) { if (free) {
......
...@@ -372,24 +372,12 @@ static inline void mb_set_bit(int bit, void *addr) ...@@ -372,24 +372,12 @@ static inline void mb_set_bit(int bit, void *addr)
ext4_set_bit(bit, addr); ext4_set_bit(bit, addr);
} }
static inline void mb_set_bit_atomic(spinlock_t *lock, int bit, void *addr)
{
addr = mb_correct_addr_and_bit(&bit, addr);
ext4_set_bit_atomic(lock, bit, addr);
}
static inline void mb_clear_bit(int bit, void *addr) static inline void mb_clear_bit(int bit, void *addr)
{ {
addr = mb_correct_addr_and_bit(&bit, addr); addr = mb_correct_addr_and_bit(&bit, addr);
ext4_clear_bit(bit, addr); ext4_clear_bit(bit, addr);
} }
static inline void mb_clear_bit_atomic(spinlock_t *lock, int bit, void *addr)
{
addr = mb_correct_addr_and_bit(&bit, addr);
ext4_clear_bit_atomic(lock, bit, addr);
}
static inline int mb_find_next_zero_bit(void *addr, int max, int start) static inline int mb_find_next_zero_bit(void *addr, int max, int start)
{ {
int fix = 0, ret, tmpmax; int fix = 0, ret, tmpmax;
...@@ -803,17 +791,17 @@ static int ext4_mb_init_cache(struct page *page, char *incore) ...@@ -803,17 +791,17 @@ static int ext4_mb_init_cache(struct page *page, char *incore)
unlock_buffer(bh[i]); unlock_buffer(bh[i]);
continue; continue;
} }
spin_lock(sb_bgl_lock(EXT4_SB(sb), first_group + i)); ext4_lock_group(sb, first_group + i);
if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
ext4_init_block_bitmap(sb, bh[i], ext4_init_block_bitmap(sb, bh[i],
first_group + i, desc); first_group + i, desc);
set_bitmap_uptodate(bh[i]); set_bitmap_uptodate(bh[i]);
set_buffer_uptodate(bh[i]); set_buffer_uptodate(bh[i]);
spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i)); ext4_unlock_group(sb, first_group + i);
unlock_buffer(bh[i]); unlock_buffer(bh[i]);
continue; continue;
} }
spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i)); ext4_unlock_group(sb, first_group + i);
if (buffer_uptodate(bh[i])) { if (buffer_uptodate(bh[i])) {
/* /*
* if not uninit if bh is uptodate, * if not uninit if bh is uptodate,
...@@ -1080,7 +1068,7 @@ static int mb_find_order_for_block(struct ext4_buddy *e4b, int block) ...@@ -1080,7 +1068,7 @@ static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
return 0; return 0;
} }
static void mb_clear_bits(spinlock_t *lock, void *bm, int cur, int len) static void mb_clear_bits(void *bm, int cur, int len)
{ {
__u32 *addr; __u32 *addr;
...@@ -1093,15 +1081,12 @@ static void mb_clear_bits(spinlock_t *lock, void *bm, int cur, int len) ...@@ -1093,15 +1081,12 @@ static void mb_clear_bits(spinlock_t *lock, void *bm, int cur, int len)
cur += 32; cur += 32;
continue; continue;
} }
if (lock)
mb_clear_bit_atomic(lock, cur, bm);
else
mb_clear_bit(cur, bm); mb_clear_bit(cur, bm);
cur++; cur++;
} }
} }
static void mb_set_bits(spinlock_t *lock, void *bm, int cur, int len) static void mb_set_bits(void *bm, int cur, int len)
{ {
__u32 *addr; __u32 *addr;
...@@ -1114,9 +1099,6 @@ static void mb_set_bits(spinlock_t *lock, void *bm, int cur, int len) ...@@ -1114,9 +1099,6 @@ static void mb_set_bits(spinlock_t *lock, void *bm, int cur, int len)
cur += 32; cur += 32;
continue; continue;
} }
if (lock)
mb_set_bit_atomic(lock, cur, bm);
else
mb_set_bit(cur, bm); mb_set_bit(cur, bm);
cur++; cur++;
} }
...@@ -1332,8 +1314,7 @@ static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex) ...@@ -1332,8 +1314,7 @@ static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
e4b->bd_info->bb_counters[ord]++; e4b->bd_info->bb_counters[ord]++;
} }
mb_set_bits(sb_bgl_lock(EXT4_SB(e4b->bd_sb), ex->fe_group), mb_set_bits(EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
mb_check_buddy(e4b); mb_check_buddy(e4b);
return ret; return ret;
...@@ -2756,7 +2737,7 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery) ...@@ -2756,7 +2737,7 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
return 0; return 0;
} }
/* need to called with ext4 group lock (ext4_lock_group) */ /* need to called with the ext4 group lock held */
static void ext4_mb_cleanup_pa(struct ext4_group_info *grp) static void ext4_mb_cleanup_pa(struct ext4_group_info *grp)
{ {
struct ext4_prealloc_space *pa; struct ext4_prealloc_space *pa;
...@@ -2993,14 +2974,17 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, ...@@ -2993,14 +2974,17 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
* Fix the bitmap and repeat the block allocation * Fix the bitmap and repeat the block allocation
* We leak some of the blocks here. * We leak some of the blocks here.
*/ */
mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group), ext4_lock_group(sb, ac->ac_b_ex.fe_group);
bitmap_bh->b_data, ac->ac_b_ex.fe_start, mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
ac->ac_b_ex.fe_len); ac->ac_b_ex.fe_len);
ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
if (!err) if (!err)
err = -EAGAIN; err = -EAGAIN;
goto out_err; goto out_err;
} }
ext4_lock_group(sb, ac->ac_b_ex.fe_group);
#ifdef AGGRESSIVE_CHECK #ifdef AGGRESSIVE_CHECK
{ {
int i; int i;
...@@ -3010,9 +2994,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, ...@@ -3010,9 +2994,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
} }
} }
#endif #endif
spin_lock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group)); mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,ac->ac_b_ex.fe_len);
mb_set_bits(NULL, bitmap_bh->b_data,
ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len);
if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
ext4_free_blks_set(sb, gdp, ext4_free_blks_set(sb, gdp,
...@@ -3022,7 +3004,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, ...@@ -3022,7 +3004,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
len = ext4_free_blks_count(sb, gdp) - ac->ac_b_ex.fe_len; len = ext4_free_blks_count(sb, gdp) - ac->ac_b_ex.fe_len;
ext4_free_blks_set(sb, gdp, len); ext4_free_blks_set(sb, gdp, len);
gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp); gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len); percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len);
/* /*
* Now reduce the dirty block count also. Should not go negative * Now reduce the dirty block count also. Should not go negative
...@@ -3455,7 +3438,7 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac) ...@@ -3455,7 +3438,7 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
* the function goes through all block freed in the group * the function goes through all block freed in the group
* but not yet committed and marks them used in in-core bitmap. * but not yet committed and marks them used in in-core bitmap.
* buddy must be generated from this bitmap * buddy must be generated from this bitmap
* Need to be called with ext4 group lock (ext4_lock_group) * Need to be called with the ext4 group lock held
*/ */
static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
ext4_group_t group) ext4_group_t group)
...@@ -3469,9 +3452,7 @@ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, ...@@ -3469,9 +3452,7 @@ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
while (n) { while (n) {
entry = rb_entry(n, struct ext4_free_data, node); entry = rb_entry(n, struct ext4_free_data, node);
mb_set_bits(sb_bgl_lock(EXT4_SB(sb), group), mb_set_bits(bitmap, entry->start_blk, entry->count);
bitmap, entry->start_blk,
entry->count);
n = rb_next(n); n = rb_next(n);
} }
return; return;
...@@ -3480,7 +3461,7 @@ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, ...@@ -3480,7 +3461,7 @@ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
/* /*
* the function goes through all preallocation in this group and marks them * the function goes through all preallocation in this group and marks them
* used in in-core bitmap. buddy must be generated from this bitmap * used in in-core bitmap. buddy must be generated from this bitmap
* Need to be called with ext4 group lock (ext4_lock_group) * Need to be called with ext4 group lock held
*/ */
static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
ext4_group_t group) ext4_group_t group)
...@@ -3512,8 +3493,7 @@ static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, ...@@ -3512,8 +3493,7 @@ static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
if (unlikely(len == 0)) if (unlikely(len == 0))
continue; continue;
BUG_ON(groupnr != group); BUG_ON(groupnr != group);
mb_set_bits(sb_bgl_lock(EXT4_SB(sb), group), mb_set_bits(bitmap, start, len);
bitmap, start, len);
preallocated += len; preallocated += len;
count++; count++;
} }
...@@ -4856,29 +4836,25 @@ void ext4_mb_free_blocks(handle_t *handle, struct inode *inode, ...@@ -4856,29 +4836,25 @@ void ext4_mb_free_blocks(handle_t *handle, struct inode *inode,
new_entry->group = block_group; new_entry->group = block_group;
new_entry->count = count; new_entry->count = count;
new_entry->t_tid = handle->h_transaction->t_tid; new_entry->t_tid = handle->h_transaction->t_tid;
ext4_lock_group(sb, block_group); ext4_lock_group(sb, block_group);
mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data, mb_clear_bits(bitmap_bh->b_data, bit, count);
bit, count);
ext4_mb_free_metadata(handle, &e4b, new_entry); ext4_mb_free_metadata(handle, &e4b, new_entry);
ext4_unlock_group(sb, block_group);
} else { } else {
ext4_lock_group(sb, block_group);
/* need to update group_info->bb_free and bitmap /* need to update group_info->bb_free and bitmap
* with group lock held. generate_buddy look at * with group lock held. generate_buddy look at
* them with group lock_held * them with group lock_held
*/ */
mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data, ext4_lock_group(sb, block_group);
bit, count); mb_clear_bits(bitmap_bh->b_data, bit, count);
mb_free_blocks(inode, &e4b, bit, count); mb_free_blocks(inode, &e4b, bit, count);
ext4_mb_return_to_preallocation(inode, &e4b, block, count); ext4_mb_return_to_preallocation(inode, &e4b, block, count);
ext4_unlock_group(sb, block_group);
} }
spin_lock(sb_bgl_lock(sbi, block_group));
ret = ext4_free_blks_count(sb, gdp) + count; ret = ext4_free_blks_count(sb, gdp) + count;
ext4_free_blks_set(sb, gdp, ret); ext4_free_blks_set(sb, gdp, ret);
gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp); gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
spin_unlock(sb_bgl_lock(sbi, block_group)); ext4_unlock_group(sb, block_group);
percpu_counter_add(&sbi->s_freeblocks_counter, count); percpu_counter_add(&sbi->s_freeblocks_counter, count);
if (sbi->s_log_groups_per_flex) { if (sbi->s_log_groups_per_flex) {
......
...@@ -1784,18 +1784,18 @@ static int ext4_check_descriptors(struct super_block *sb) ...@@ -1784,18 +1784,18 @@ static int ext4_check_descriptors(struct super_block *sb)
"(block %llu)!\n", i, inode_table); "(block %llu)!\n", i, inode_table);
return 0; return 0;
} }
spin_lock(sb_bgl_lock(sbi, i)); ext4_lock_group(sb, i);
if (!ext4_group_desc_csum_verify(sbi, i, gdp)) { if (!ext4_group_desc_csum_verify(sbi, i, gdp)) {
printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: " printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
"Checksum for group %u failed (%u!=%u)\n", "Checksum for group %u failed (%u!=%u)\n",
i, le16_to_cpu(ext4_group_desc_csum(sbi, i, i, le16_to_cpu(ext4_group_desc_csum(sbi, i,
gdp)), le16_to_cpu(gdp->bg_checksum)); gdp)), le16_to_cpu(gdp->bg_checksum));
if (!(sb->s_flags & MS_RDONLY)) { if (!(sb->s_flags & MS_RDONLY)) {
spin_unlock(sb_bgl_lock(sbi, i)); ext4_unlock_group(sb, i);
return 0; return 0;
} }
} }
spin_unlock(sb_bgl_lock(sbi, i)); ext4_unlock_group(sb, i);
if (!flexbg_flag) if (!flexbg_flag)
first_block += EXT4_BLOCKS_PER_GROUP(sb); first_block += EXT4_BLOCKS_PER_GROUP(sb);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册