提交 e8546d06 编写于 作者: M Marcin Slusarz 提交者: Theodore Ts'o

ext4: le*_add_cpu conversion

replace all:
little_endian_variable = cpu_to_leX(leX_to_cpu(little_endian_variable) +
					expression_in_cpu_byteorder);
with:
	leX_add_cpu(&little_endian_variable, expression_in_cpu_byteorder);
generated with semantic patch
Signed-off-by: NMarcin Slusarz <marcin.slusarz@gmail.com>
Signed-off-by: N"Theodore Ts'o" <tytso@mit.edu>
Cc: linux-ext4@vger.kernel.org
Cc: sct@redhat.com
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: adilger@clusterfs.com
Cc: Mingming Cao <cmm@us.ibm.com>
上级 9a0762c5
...@@ -752,9 +752,7 @@ void ext4_free_blocks_sb(handle_t *handle, struct super_block *sb, ...@@ -752,9 +752,7 @@ void ext4_free_blocks_sb(handle_t *handle, struct super_block *sb,
jbd_unlock_bh_state(bitmap_bh); jbd_unlock_bh_state(bitmap_bh);
spin_lock(sb_bgl_lock(sbi, block_group)); spin_lock(sb_bgl_lock(sbi, block_group));
desc->bg_free_blocks_count = le16_add_cpu(&desc->bg_free_blocks_count, group_freed);
cpu_to_le16(le16_to_cpu(desc->bg_free_blocks_count) +
group_freed);
desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc); desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
spin_unlock(sb_bgl_lock(sbi, block_group)); spin_unlock(sb_bgl_lock(sbi, block_group));
percpu_counter_add(&sbi->s_freeblocks_counter, count); percpu_counter_add(&sbi->s_freeblocks_counter, count);
...@@ -1823,8 +1821,7 @@ ext4_fsblk_t ext4_new_blocks_old(handle_t *handle, struct inode *inode, ...@@ -1823,8 +1821,7 @@ ext4_fsblk_t ext4_new_blocks_old(handle_t *handle, struct inode *inode,
spin_lock(sb_bgl_lock(sbi, group_no)); spin_lock(sb_bgl_lock(sbi, group_no));
if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))
gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
gdp->bg_free_blocks_count = le16_add_cpu(&gdp->bg_free_blocks_count, -num);
cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)-num);
gdp->bg_checksum = ext4_group_desc_csum(sbi, group_no, gdp); gdp->bg_checksum = ext4_group_desc_csum(sbi, group_no, gdp);
spin_unlock(sb_bgl_lock(sbi, group_no)); spin_unlock(sb_bgl_lock(sbi, group_no));
percpu_counter_sub(&sbi->s_freeblocks_counter, num); percpu_counter_sub(&sbi->s_freeblocks_counter, num);
......
...@@ -614,7 +614,7 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, ...@@ -614,7 +614,7 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
ix->ei_block = cpu_to_le32(logical); ix->ei_block = cpu_to_le32(logical);
ext4_idx_store_pblock(ix, ptr); ext4_idx_store_pblock(ix, ptr);
curp->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(curp->p_hdr->eh_entries)+1); le16_add_cpu(&curp->p_hdr->eh_entries, 1);
BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries) BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries)
> le16_to_cpu(curp->p_hdr->eh_max)); > le16_to_cpu(curp->p_hdr->eh_max));
...@@ -736,7 +736,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, ...@@ -736,7 +736,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
} }
if (m) { if (m) {
memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m); memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m);
neh->eh_entries = cpu_to_le16(le16_to_cpu(neh->eh_entries)+m); le16_add_cpu(&neh->eh_entries, m);
} }
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
...@@ -753,8 +753,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, ...@@ -753,8 +753,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
err = ext4_ext_get_access(handle, inode, path + depth); err = ext4_ext_get_access(handle, inode, path + depth);
if (err) if (err)
goto cleanup; goto cleanup;
path[depth].p_hdr->eh_entries = le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
cpu_to_le16(le16_to_cpu(path[depth].p_hdr->eh_entries)-m);
err = ext4_ext_dirty(handle, inode, path + depth); err = ext4_ext_dirty(handle, inode, path + depth);
if (err) if (err)
goto cleanup; goto cleanup;
...@@ -817,8 +816,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, ...@@ -817,8 +816,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
if (m) { if (m) {
memmove(++fidx, path[i].p_idx - m, memmove(++fidx, path[i].p_idx - m,
sizeof(struct ext4_extent_idx) * m); sizeof(struct ext4_extent_idx) * m);
neh->eh_entries = le16_add_cpu(&neh->eh_entries, m);
cpu_to_le16(le16_to_cpu(neh->eh_entries) + m);
} }
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
unlock_buffer(bh); unlock_buffer(bh);
...@@ -834,7 +832,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, ...@@ -834,7 +832,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
err = ext4_ext_get_access(handle, inode, path + i); err = ext4_ext_get_access(handle, inode, path + i);
if (err) if (err)
goto cleanup; goto cleanup;
path[i].p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path[i].p_hdr->eh_entries)-m); le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
err = ext4_ext_dirty(handle, inode, path + i); err = ext4_ext_dirty(handle, inode, path + i);
if (err) if (err)
goto cleanup; goto cleanup;
...@@ -1369,7 +1367,7 @@ int ext4_ext_try_to_merge(struct inode *inode, ...@@ -1369,7 +1367,7 @@ int ext4_ext_try_to_merge(struct inode *inode,
* sizeof(struct ext4_extent); * sizeof(struct ext4_extent);
memmove(ex + 1, ex + 2, len); memmove(ex + 1, ex + 2, len);
} }
eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries) - 1); le16_add_cpu(&eh->eh_entries, -1);
merge_done = 1; merge_done = 1;
WARN_ON(eh->eh_entries == 0); WARN_ON(eh->eh_entries == 0);
if (!eh->eh_entries) if (!eh->eh_entries)
...@@ -1560,7 +1558,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, ...@@ -1560,7 +1558,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
path[depth].p_ext = nearex; path[depth].p_ext = nearex;
} }
eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)+1); le16_add_cpu(&eh->eh_entries, 1);
nearex = path[depth].p_ext; nearex = path[depth].p_ext;
nearex->ee_block = newext->ee_block; nearex->ee_block = newext->ee_block;
ext4_ext_store_pblock(nearex, ext_pblock(newext)); ext4_ext_store_pblock(nearex, ext_pblock(newext));
...@@ -1699,7 +1697,7 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, ...@@ -1699,7 +1697,7 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
err = ext4_ext_get_access(handle, inode, path); err = ext4_ext_get_access(handle, inode, path);
if (err) if (err)
return err; return err;
path->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path->p_hdr->eh_entries)-1); le16_add_cpu(&path->p_hdr->eh_entries, -1);
err = ext4_ext_dirty(handle, inode, path); err = ext4_ext_dirty(handle, inode, path);
if (err) if (err)
return err; return err;
...@@ -1902,7 +1900,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, ...@@ -1902,7 +1900,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
if (num == 0) { if (num == 0) {
/* this extent is removed; mark slot entirely unused */ /* this extent is removed; mark slot entirely unused */
ext4_ext_store_pblock(ex, 0); ext4_ext_store_pblock(ex, 0);
eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)-1); le16_add_cpu(&eh->eh_entries, -1);
} }
ex->ee_block = cpu_to_le32(block); ex->ee_block = cpu_to_le32(block);
......
...@@ -223,11 +223,9 @@ void ext4_free_inode (handle_t *handle, struct inode * inode) ...@@ -223,11 +223,9 @@ void ext4_free_inode (handle_t *handle, struct inode * inode)
if (gdp) { if (gdp) {
spin_lock(sb_bgl_lock(sbi, block_group)); spin_lock(sb_bgl_lock(sbi, block_group));
gdp->bg_free_inodes_count = cpu_to_le16( le16_add_cpu(&gdp->bg_free_inodes_count, 1);
le16_to_cpu(gdp->bg_free_inodes_count) + 1);
if (is_directory) if (is_directory)
gdp->bg_used_dirs_count = cpu_to_le16( le16_add_cpu(&gdp->bg_used_dirs_count, -1);
le16_to_cpu(gdp->bg_used_dirs_count) - 1);
gdp->bg_checksum = ext4_group_desc_csum(sbi, gdp->bg_checksum = ext4_group_desc_csum(sbi,
block_group, gdp); block_group, gdp);
spin_unlock(sb_bgl_lock(sbi, block_group)); spin_unlock(sb_bgl_lock(sbi, block_group));
...@@ -664,11 +662,9 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode * dir, int mode) ...@@ -664,11 +662,9 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode * dir, int mode)
cpu_to_le16(EXT4_INODES_PER_GROUP(sb) - ino); cpu_to_le16(EXT4_INODES_PER_GROUP(sb) - ino);
} }
gdp->bg_free_inodes_count = le16_add_cpu(&gdp->bg_free_inodes_count, -1);
cpu_to_le16(le16_to_cpu(gdp->bg_free_inodes_count) - 1);
if (S_ISDIR(mode)) { if (S_ISDIR(mode)) {
gdp->bg_used_dirs_count = le16_add_cpu(&gdp->bg_used_dirs_count, 1);
cpu_to_le16(le16_to_cpu(gdp->bg_used_dirs_count) + 1);
} }
gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp); gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
spin_unlock(sb_bgl_lock(sbi, group)); spin_unlock(sb_bgl_lock(sbi, group));
......
...@@ -3099,9 +3099,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, ...@@ -3099,9 +3099,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_group,
gdp)); gdp));
} }
gdp->bg_free_blocks_count = le16_add_cpu(&gdp->bg_free_blocks_count, -ac->ac_b_ex.fe_len);
cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)
- ac->ac_b_ex.fe_len);
gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp); gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group)); spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len); percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len);
...@@ -4593,8 +4591,7 @@ void ext4_mb_free_blocks(handle_t *handle, struct inode *inode, ...@@ -4593,8 +4591,7 @@ void ext4_mb_free_blocks(handle_t *handle, struct inode *inode,
} }
spin_lock(sb_bgl_lock(sbi, block_group)); spin_lock(sb_bgl_lock(sbi, block_group));
gdp->bg_free_blocks_count = le16_add_cpu(&gdp->bg_free_blocks_count, count);
cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) + count);
gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp); gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
spin_unlock(sb_bgl_lock(sbi, block_group)); spin_unlock(sb_bgl_lock(sbi, block_group));
percpu_counter_add(&sbi->s_freeblocks_counter, count); percpu_counter_add(&sbi->s_freeblocks_counter, count);
......
...@@ -502,8 +502,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode, ...@@ -502,8 +502,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
EXT4_SB(sb)->s_gdb_count++; EXT4_SB(sb)->s_gdb_count++;
kfree(o_group_desc); kfree(o_group_desc);
es->s_reserved_gdt_blocks = le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
cpu_to_le16(le16_to_cpu(es->s_reserved_gdt_blocks) - 1);
ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh); ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh);
return 0; return 0;
...@@ -877,8 +876,7 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input) ...@@ -877,8 +876,7 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
*/ */
ext4_blocks_count_set(es, ext4_blocks_count(es) + ext4_blocks_count_set(es, ext4_blocks_count(es) +
input->blocks_count); input->blocks_count);
es->s_inodes_count = cpu_to_le32(le32_to_cpu(es->s_inodes_count) + le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb));
EXT4_INODES_PER_GROUP(sb));
/* /*
* We need to protect s_groups_count against other CPUs seeing * We need to protect s_groups_count against other CPUs seeing
......
...@@ -1392,7 +1392,7 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es, ...@@ -1392,7 +1392,7 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
#endif #endif
if (!(__s16) le16_to_cpu(es->s_max_mnt_count)) if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT); es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
es->s_mnt_count=cpu_to_le16(le16_to_cpu(es->s_mnt_count) + 1); le16_add_cpu(&es->s_mnt_count, 1);
es->s_mtime = cpu_to_le32(get_seconds()); es->s_mtime = cpu_to_le32(get_seconds());
ext4_update_dynamic_rev(sb); ext4_update_dynamic_rev(sb);
EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
......
...@@ -484,8 +484,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode, ...@@ -484,8 +484,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
get_bh(bh); get_bh(bh);
ext4_forget(handle, 1, inode, bh, bh->b_blocknr); ext4_forget(handle, 1, inode, bh, bh->b_blocknr);
} else { } else {
BHDR(bh)->h_refcount = cpu_to_le32( le32_add_cpu(&BHDR(bh)->h_refcount, -1);
le32_to_cpu(BHDR(bh)->h_refcount) - 1);
error = ext4_journal_dirty_metadata(handle, bh); error = ext4_journal_dirty_metadata(handle, bh);
if (IS_SYNC(inode)) if (IS_SYNC(inode))
handle->h_sync = 1; handle->h_sync = 1;
...@@ -789,8 +788,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, ...@@ -789,8 +788,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
if (error) if (error)
goto cleanup_dquot; goto cleanup_dquot;
lock_buffer(new_bh); lock_buffer(new_bh);
BHDR(new_bh)->h_refcount = cpu_to_le32(1 + le32_add_cpu(&BHDR(new_bh)->h_refcount, 1);
le32_to_cpu(BHDR(new_bh)->h_refcount));
ea_bdebug(new_bh, "reusing; refcount now=%d", ea_bdebug(new_bh, "reusing; refcount now=%d",
le32_to_cpu(BHDR(new_bh)->h_refcount)); le32_to_cpu(BHDR(new_bh)->h_refcount));
unlock_buffer(new_bh); unlock_buffer(new_bh);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册