提交 feb0ab32 编写于 作者: D Darrick J. Wong 提交者: Theodore Ts'o

ext4: make block group checksums use metadata_csum algorithm

metadata_csum supersedes uninit_bg.  Convert the ROCOMPAT uninit_bg
flag check to a helper function that covers both, and make the
checksum calculation algorithm use either crc16 or the metadata_csum
chosen algorithm depending on which flag is set.  Print a warning if
we try to mount a filesystem with both feature flags set.
Signed-off-by: NDarrick J. Wong <djwong@us.ibm.com>
Signed-off-by: N"Theodore Ts'o" <tytso@mit.edu>
上级 cc8e94fd
...@@ -168,7 +168,7 @@ void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh, ...@@ -168,7 +168,7 @@ void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
/* If checksum is bad mark all blocks used to prevent allocation /* If checksum is bad mark all blocks used to prevent allocation
* essentially implementing a per-group read-only flag. */ * essentially implementing a per-group read-only flag. */
if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) { if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
ext4_error(sb, "Checksum bad for group %u", block_group); ext4_error(sb, "Checksum bad for group %u", block_group);
ext4_free_group_clusters_set(sb, gdp, 0); ext4_free_group_clusters_set(sb, gdp, 0);
ext4_free_inodes_set(sb, gdp, 0); ext4_free_inodes_set(sb, gdp, 0);
...@@ -214,7 +214,7 @@ void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh, ...@@ -214,7 +214,7 @@ void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
sb->s_blocksize * 8, bh->b_data); sb->s_blocksize * 8, bh->b_data);
ext4_block_bitmap_csum_set(sb, block_group, gdp, bh, ext4_block_bitmap_csum_set(sb, block_group, gdp, bh,
EXT4_BLOCKS_PER_GROUP(sb) / 8); EXT4_BLOCKS_PER_GROUP(sb) / 8);
gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp); ext4_group_desc_csum_set(sb, block_group, gdp);
} }
/* Return the number of free blocks in a block group. It is used when /* Return the number of free blocks in a block group. It is used when
......
...@@ -2114,10 +2114,10 @@ extern void ext4_used_dirs_set(struct super_block *sb, ...@@ -2114,10 +2114,10 @@ extern void ext4_used_dirs_set(struct super_block *sb,
struct ext4_group_desc *bg, __u32 count); struct ext4_group_desc *bg, __u32 count);
extern void ext4_itable_unused_set(struct super_block *sb, extern void ext4_itable_unused_set(struct super_block *sb,
struct ext4_group_desc *bg, __u32 count); struct ext4_group_desc *bg, __u32 count);
extern __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 group, extern int ext4_group_desc_csum_verify(struct super_block *sb, __u32 group,
struct ext4_group_desc *gdp);
extern int ext4_group_desc_csum_verify(struct ext4_sb_info *sbi, __u32 group,
struct ext4_group_desc *gdp); struct ext4_group_desc *gdp);
extern void ext4_group_desc_csum_set(struct super_block *sb, __u32 group,
struct ext4_group_desc *gdp);
static inline int ext4_has_group_desc_csum(struct super_block *sb) static inline int ext4_has_group_desc_csum(struct super_block *sb)
{ {
......
...@@ -70,13 +70,11 @@ static unsigned ext4_init_inode_bitmap(struct super_block *sb, ...@@ -70,13 +70,11 @@ static unsigned ext4_init_inode_bitmap(struct super_block *sb,
ext4_group_t block_group, ext4_group_t block_group,
struct ext4_group_desc *gdp) struct ext4_group_desc *gdp)
{ {
struct ext4_sb_info *sbi = EXT4_SB(sb);
J_ASSERT_BH(bh, buffer_locked(bh)); J_ASSERT_BH(bh, buffer_locked(bh));
/* If checksum is bad mark all blocks and inodes use to prevent /* If checksum is bad mark all blocks and inodes use to prevent
* allocation, essentially implementing a per-group read-only flag. */ * allocation, essentially implementing a per-group read-only flag. */
if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) { if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
ext4_error(sb, "Checksum bad for group %u", block_group); ext4_error(sb, "Checksum bad for group %u", block_group);
ext4_free_group_clusters_set(sb, gdp, 0); ext4_free_group_clusters_set(sb, gdp, 0);
ext4_free_inodes_set(sb, gdp, 0); ext4_free_inodes_set(sb, gdp, 0);
...@@ -92,7 +90,7 @@ static unsigned ext4_init_inode_bitmap(struct super_block *sb, ...@@ -92,7 +90,7 @@ static unsigned ext4_init_inode_bitmap(struct super_block *sb,
bh->b_data); bh->b_data);
ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh, ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
EXT4_INODES_PER_GROUP(sb) / 8); EXT4_INODES_PER_GROUP(sb) / 8);
gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp); ext4_group_desc_csum_set(sb, block_group, gdp);
return EXT4_INODES_PER_GROUP(sb); return EXT4_INODES_PER_GROUP(sb);
} }
...@@ -298,7 +296,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode) ...@@ -298,7 +296,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
} }
ext4_inode_bitmap_csum_set(sb, block_group, gdp, bitmap_bh, ext4_inode_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
EXT4_INODES_PER_GROUP(sb) / 8); EXT4_INODES_PER_GROUP(sb) / 8);
gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp); ext4_group_desc_csum_set(sb, block_group, gdp);
ext4_unlock_group(sb, block_group); ext4_unlock_group(sb, block_group);
percpu_counter_inc(&sbi->s_freeinodes_counter); percpu_counter_inc(&sbi->s_freeinodes_counter);
...@@ -731,7 +729,7 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, umode_t mode, ...@@ -731,7 +729,7 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, umode_t mode,
got: got:
/* We may have to initialize the block bitmap if it isn't already */ /* We may have to initialize the block bitmap if it isn't already */
if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) && if (ext4_has_group_desc_csum(sb) &&
gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
struct buffer_head *block_bitmap_bh; struct buffer_head *block_bitmap_bh;
...@@ -757,8 +755,7 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, umode_t mode, ...@@ -757,8 +755,7 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, umode_t mode,
block_bitmap_bh, block_bitmap_bh,
EXT4_BLOCKS_PER_GROUP(sb) / EXT4_BLOCKS_PER_GROUP(sb) /
8); 8);
gdp->bg_checksum = ext4_group_desc_csum(sbi, group, ext4_group_desc_csum_set(sb, group, gdp);
gdp);
} }
ext4_unlock_group(sb, group); ext4_unlock_group(sb, group);
...@@ -811,7 +808,7 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, umode_t mode, ...@@ -811,7 +808,7 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, umode_t mode,
if (ext4_has_group_desc_csum(sb)) { if (ext4_has_group_desc_csum(sb)) {
ext4_inode_bitmap_csum_set(sb, group, gdp, inode_bitmap_bh, ext4_inode_bitmap_csum_set(sb, group, gdp, inode_bitmap_bh,
EXT4_INODES_PER_GROUP(sb) / 8); EXT4_INODES_PER_GROUP(sb) / 8);
gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp); ext4_group_desc_csum_set(sb, group, gdp);
ext4_unlock_group(sb, group); ext4_unlock_group(sb, group);
} }
...@@ -1181,7 +1178,7 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group, ...@@ -1181,7 +1178,7 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
skip_zeroout: skip_zeroout:
ext4_lock_group(sb, group); ext4_lock_group(sb, group);
gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED); gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED);
gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp); ext4_group_desc_csum_set(sb, group, gdp);
ext4_unlock_group(sb, group); ext4_unlock_group(sb, group);
BUFFER_TRACE(group_desc_bh, BUFFER_TRACE(group_desc_bh,
......
...@@ -3584,8 +3584,7 @@ static int __ext4_get_inode_loc(struct inode *inode, ...@@ -3584,8 +3584,7 @@ static int __ext4_get_inode_loc(struct inode *inode,
b = table; b = table;
end = b + EXT4_SB(sb)->s_inode_readahead_blks; end = b + EXT4_SB(sb)->s_inode_readahead_blks;
num = EXT4_INODES_PER_GROUP(sb); num = EXT4_INODES_PER_GROUP(sb);
if (EXT4_HAS_RO_COMPAT_FEATURE(sb, if (ext4_has_group_desc_csum(sb))
EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
num -= ext4_itable_unused_count(sb, gdp); num -= ext4_itable_unused_count(sb, gdp);
table += num / inodes_per_block; table += num / inodes_per_block;
if (end > table) if (end > table)
......
...@@ -2799,7 +2799,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, ...@@ -2799,7 +2799,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
ext4_free_group_clusters_set(sb, gdp, len); ext4_free_group_clusters_set(sb, gdp, len);
ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh, ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh,
EXT4_BLOCKS_PER_GROUP(sb) / 8); EXT4_BLOCKS_PER_GROUP(sb) / 8);
gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp); ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
ext4_unlock_group(sb, ac->ac_b_ex.fe_group); ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len); percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
...@@ -4663,7 +4663,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, ...@@ -4663,7 +4663,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
ext4_free_group_clusters_set(sb, gdp, ret); ext4_free_group_clusters_set(sb, gdp, ret);
ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh, ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
EXT4_BLOCKS_PER_GROUP(sb) / 8); EXT4_BLOCKS_PER_GROUP(sb) / 8);
gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp); ext4_group_desc_csum_set(sb, block_group, gdp);
ext4_unlock_group(sb, block_group); ext4_unlock_group(sb, block_group);
percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters); percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);
...@@ -4809,7 +4809,7 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, ...@@ -4809,7 +4809,7 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
ext4_free_group_clusters_set(sb, desc, blk_free_count); ext4_free_group_clusters_set(sb, desc, blk_free_count);
ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh, ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh,
EXT4_BLOCKS_PER_GROUP(sb) / 8); EXT4_BLOCKS_PER_GROUP(sb) / 8);
desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc); ext4_group_desc_csum_set(sb, block_group, desc);
ext4_unlock_group(sb, block_group); ext4_unlock_group(sb, block_group);
percpu_counter_add(&sbi->s_freeclusters_counter, percpu_counter_add(&sbi->s_freeclusters_counter,
EXT4_B2C(sbi, blocks_freed)); EXT4_B2C(sbi, blocks_freed));
......
...@@ -1160,7 +1160,7 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb, ...@@ -1160,7 +1160,7 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
EXT4_B2C(sbi, group_data->free_blocks_count)); EXT4_B2C(sbi, group_data->free_blocks_count));
ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb)); ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
gdp->bg_flags = cpu_to_le16(*bg_flags); gdp->bg_flags = cpu_to_le16(*bg_flags);
gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp); ext4_group_desc_csum_set(sb, group, gdp);
err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh); err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
if (unlikely(err)) { if (unlikely(err)) {
...@@ -1399,17 +1399,14 @@ static int ext4_setup_next_flex_gd(struct super_block *sb, ...@@ -1399,17 +1399,14 @@ static int ext4_setup_next_flex_gd(struct super_block *sb,
(1 + ext4_bg_num_gdb(sb, group + i) + (1 + ext4_bg_num_gdb(sb, group + i) +
le16_to_cpu(es->s_reserved_gdt_blocks)) : 0; le16_to_cpu(es->s_reserved_gdt_blocks)) : 0;
group_data[i].free_blocks_count = blocks_per_group - overhead; group_data[i].free_blocks_count = blocks_per_group - overhead;
if (EXT4_HAS_RO_COMPAT_FEATURE(sb, if (ext4_has_group_desc_csum(sb))
EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT | flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT |
EXT4_BG_INODE_UNINIT; EXT4_BG_INODE_UNINIT;
else else
flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED; flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED;
} }
if (last_group == n_group && if (last_group == n_group && ext4_has_group_desc_csum(sb))
EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
/* We need to initialize block bitmap of last group. */ /* We need to initialize block bitmap of last group. */
flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT; flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT;
......
...@@ -1952,43 +1952,69 @@ static int ext4_fill_flex_info(struct super_block *sb) ...@@ -1952,43 +1952,69 @@ static int ext4_fill_flex_info(struct super_block *sb)
return 0; return 0;
} }
__le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group, static __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group,
struct ext4_group_desc *gdp) struct ext4_group_desc *gdp)
{ {
int offset;
__u16 crc = 0; __u16 crc = 0;
__le32 le_group = cpu_to_le32(block_group);
if (sbi->s_es->s_feature_ro_compat & if ((sbi->s_es->s_feature_ro_compat &
cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) { cpu_to_le32(EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))) {
int offset = offsetof(struct ext4_group_desc, bg_checksum); /* Use new metadata_csum algorithm */
__le32 le_group = cpu_to_le32(block_group); __u16 old_csum;
__u32 csum32;
crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group)); old_csum = gdp->bg_checksum;
crc = crc16(crc, (__u8 *)gdp, offset); gdp->bg_checksum = 0;
offset += sizeof(gdp->bg_checksum); /* skip checksum */ csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group,
/* for checksum of struct ext4_group_desc do the rest...*/ sizeof(le_group));
if ((sbi->s_es->s_feature_incompat & csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp,
cpu_to_le32(EXT4_FEATURE_INCOMPAT_64BIT)) && sbi->s_desc_size);
offset < le16_to_cpu(sbi->s_es->s_desc_size)) gdp->bg_checksum = old_csum;
crc = crc16(crc, (__u8 *)gdp + offset,
le16_to_cpu(sbi->s_es->s_desc_size) - crc = csum32 & 0xFFFF;
offset); goto out;
} }
/* old crc16 code */
offset = offsetof(struct ext4_group_desc, bg_checksum);
crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
crc = crc16(crc, (__u8 *)gdp, offset);
offset += sizeof(gdp->bg_checksum); /* skip checksum */
/* for checksum of struct ext4_group_desc do the rest...*/
if ((sbi->s_es->s_feature_incompat &
cpu_to_le32(EXT4_FEATURE_INCOMPAT_64BIT)) &&
offset < le16_to_cpu(sbi->s_es->s_desc_size))
crc = crc16(crc, (__u8 *)gdp + offset,
le16_to_cpu(sbi->s_es->s_desc_size) -
offset);
out:
return cpu_to_le16(crc); return cpu_to_le16(crc);
} }
int ext4_group_desc_csum_verify(struct ext4_sb_info *sbi, __u32 block_group, int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group,
struct ext4_group_desc *gdp) struct ext4_group_desc *gdp)
{ {
if ((sbi->s_es->s_feature_ro_compat & if (ext4_has_group_desc_csum(sb) &&
cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) && (gdp->bg_checksum != ext4_group_desc_csum(EXT4_SB(sb),
(gdp->bg_checksum != ext4_group_desc_csum(sbi, block_group, gdp))) block_group, gdp)))
return 0; return 0;
return 1; return 1;
} }
void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
struct ext4_group_desc *gdp)
{
if (!ext4_has_group_desc_csum(sb))
return;
gdp->bg_checksum = ext4_group_desc_csum(EXT4_SB(sb), block_group, gdp);
}
/* Called at mount-time, super-block is locked */ /* Called at mount-time, super-block is locked */
static int ext4_check_descriptors(struct super_block *sb, static int ext4_check_descriptors(struct super_block *sb,
ext4_group_t *first_not_zeroed) ext4_group_t *first_not_zeroed)
...@@ -2043,7 +2069,7 @@ static int ext4_check_descriptors(struct super_block *sb, ...@@ -2043,7 +2069,7 @@ static int ext4_check_descriptors(struct super_block *sb,
return 0; return 0;
} }
ext4_lock_group(sb, i); ext4_lock_group(sb, i);
if (!ext4_group_desc_csum_verify(sbi, i, gdp)) { if (!ext4_group_desc_csum_verify(sb, i, gdp)) {
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
"Checksum for group %u failed (%u!=%u)", "Checksum for group %u failed (%u!=%u)",
i, le16_to_cpu(ext4_group_desc_csum(sbi, i, i, le16_to_cpu(ext4_group_desc_csum(sbi, i,
...@@ -3069,6 +3095,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ...@@ -3069,6 +3095,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
goto cantfind_ext4; goto cantfind_ext4;
sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written); sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written);
/* Warn if metadata_csum and gdt_csum are both set. */
if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
ext4_warning(sb, KERN_INFO "metadata_csum and uninit_bg are "
"redundant flags; please run fsck.");
/* Check for a known checksum algorithm */ /* Check for a known checksum algorithm */
if (!ext4_verify_csum_type(sb, es)) { if (!ext4_verify_csum_type(sb, es)) {
ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with " ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
...@@ -4400,7 +4433,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) ...@@ -4400,7 +4433,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
struct ext4_group_desc *gdp = struct ext4_group_desc *gdp =
ext4_get_group_desc(sb, g, NULL); ext4_get_group_desc(sb, g, NULL);
if (!ext4_group_desc_csum_verify(sbi, g, gdp)) { if (!ext4_group_desc_csum_verify(sb, g, gdp)) {
ext4_msg(sb, KERN_ERR, ext4_msg(sb, KERN_ERR,
"ext4_remount: Checksum for group %u failed (%u!=%u)", "ext4_remount: Checksum for group %u failed (%u!=%u)",
g, le16_to_cpu(ext4_group_desc_csum(sbi, g, gdp)), g, le16_to_cpu(ext4_group_desc_csum(sbi, g, gdp)),
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册