diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 8a23483ca8d0c3668e171bbc53c7a629a2403876..3b64bb16c727784d0a5f6b6f7c12e0ac30aa4cd2 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -30,15 +30,15 @@ void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr, unsigned long *blockgrpp, ext4_grpblk_t *offsetp) { - struct ext4_super_block *es = EXT4_SB(sb)->s_es; + struct ext4_super_block *es = EXT4_SB(sb)->s_es; ext4_grpblk_t offset; - blocknr = blocknr - le32_to_cpu(es->s_first_data_block); + blocknr = blocknr - le32_to_cpu(es->s_first_data_block); offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb)); if (offsetp) *offsetp = offset; if (blockgrpp) - *blockgrpp = blocknr; + *blockgrpp = blocknr; } diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index a0f0c04e79b2bb47ca670fe7edd9875afb937d97..b9ce24129070bff5cee547a333139d9a4645d2c7 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -374,7 +374,7 @@ ext4_ext_binsearch_idx(struct inode *inode, struct ext4_ext_path *path, int bloc le32_to_cpu(ix[-1].ei_block)); } BUG_ON(k && le32_to_cpu(ix->ei_block) - <= le32_to_cpu(ix[-1].ei_block)); + <= le32_to_cpu(ix[-1].ei_block)); if (block < le32_to_cpu(ix->ei_block)) break; chix = ix; @@ -423,8 +423,8 @@ ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, int block) path->p_ext = l - 1; ext_debug(" -> %d:%llu:%d ", - le32_to_cpu(path->p_ext->ee_block), - ext_pblock(path->p_ext), + le32_to_cpu(path->p_ext->ee_block), + ext_pblock(path->p_ext), le16_to_cpu(path->p_ext->ee_len)); #ifdef CHECK_BINSEARCH @@ -435,7 +435,7 @@ ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, int block) chex = ex = EXT_FIRST_EXTENT(eh); for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { BUG_ON(k && le32_to_cpu(ex->ee_block) - <= le32_to_cpu(ex[-1].ee_block)); + <= le32_to_cpu(ex[-1].ee_block)); if (block < le32_to_cpu(ex->ee_block)) break; chex = ex; @@ -577,7 +577,7 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, curp->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(curp->p_hdr->eh_entries)+1); BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries) - > le16_to_cpu(curp->p_hdr->eh_max)); + > le16_to_cpu(curp->p_hdr->eh_max)); BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr)); err = ext4_ext_dirty(handle, inode, curp); @@ -621,12 +621,12 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, border = path[depth].p_ext[1].ee_block; ext_debug("leaf will be split." " next leaf starts at %d\n", - le32_to_cpu(border)); + le32_to_cpu(border)); } else { border = newext->ee_block; ext_debug("leaf will be added." " next leaf starts at %d\n", - le32_to_cpu(border)); + le32_to_cpu(border)); } /* @@ -684,9 +684,9 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, while (path[depth].p_ext <= EXT_MAX_EXTENT(path[depth].p_hdr)) { ext_debug("move %d:%llu:%d in new leaf %llu\n", - le32_to_cpu(path[depth].p_ext->ee_block), - ext_pblock(path[depth].p_ext), - le16_to_cpu(path[depth].p_ext->ee_len), + le32_to_cpu(path[depth].p_ext->ee_block), + ext_pblock(path[depth].p_ext), + le16_to_cpu(path[depth].p_ext->ee_len), newblock); /*memmove(ex++, path[depth].p_ext++, sizeof(struct ext4_extent)); @@ -765,9 +765,9 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, EXT_LAST_INDEX(path[i].p_hdr)); while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) { ext_debug("%d: move %d:%d in new index %llu\n", i, - le32_to_cpu(path[i].p_idx->ei_block), - idx_pblock(path[i].p_idx), - newblock); + le32_to_cpu(path[i].p_idx->ei_block), + idx_pblock(path[i].p_idx), + newblock); /*memmove(++fidx, path[i].p_idx++, sizeof(struct ext4_extent_idx)); neh->eh_entries++; @@ -1127,6 +1127,55 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, return 0; } +/* + * check if a portion of the "newext" extent overlaps with an + * existing extent. + * + * If there is an overlap discovered, it updates the length of the newext + * such that there will be no overlap, and then returns 1. + * If there is no overlap found, it returns 0. + */ +unsigned int ext4_ext_check_overlap(struct inode *inode, + struct ext4_extent *newext, + struct ext4_ext_path *path) +{ + unsigned long b1, b2; + unsigned int depth, len1; + unsigned int ret = 0; + + b1 = le32_to_cpu(newext->ee_block); + len1 = le16_to_cpu(newext->ee_len); + depth = ext_depth(inode); + if (!path[depth].p_ext) + goto out; + b2 = le32_to_cpu(path[depth].p_ext->ee_block); + + /* + * get the next allocated block if the extent in the path + * is before the requested block(s) + */ + if (b2 < b1) { + b2 = ext4_ext_next_allocated_block(path); + if (b2 == EXT_MAX_BLOCK) + goto out; + } + + /* check for wrap through zero */ + if (b1 + len1 < b1) { + len1 = EXT_MAX_BLOCK - b1; + newext->ee_len = cpu_to_le16(len1); + ret = 1; + } + + /* check for overlap */ + if (b1 + len1 > b2) { + newext->ee_len = cpu_to_le16(b2 - b1); + ret = 1; + } +out: + return ret; +} + /* * ext4_ext_insert_extent: * tries to merge requsted extent into the existing extent or @@ -1212,12 +1261,12 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, if (!nearex) { /* there is no extent in this leaf, create first one */ ext_debug("first extent in the leaf: %d:%llu:%d\n", - le32_to_cpu(newext->ee_block), - ext_pblock(newext), - le16_to_cpu(newext->ee_len)); + le32_to_cpu(newext->ee_block), + ext_pblock(newext), + le16_to_cpu(newext->ee_len)); path[depth].p_ext = EXT_FIRST_EXTENT(eh); } else if (le32_to_cpu(newext->ee_block) - > le32_to_cpu(nearex->ee_block)) { + > le32_to_cpu(nearex->ee_block)) { /* BUG_ON(newext->ee_block == nearex->ee_block); */ if (nearex != EXT_LAST_EXTENT(eh)) { len = EXT_MAX_EXTENT(eh) - nearex; @@ -1225,9 +1274,9 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, len = len < 0 ? 0 : len; ext_debug("insert %d:%llu:%d after: nearest 0x%p, " "move %d from 0x%p to 0x%p\n", - le32_to_cpu(newext->ee_block), - ext_pblock(newext), - le16_to_cpu(newext->ee_len), + le32_to_cpu(newext->ee_block), + ext_pblock(newext), + le16_to_cpu(newext->ee_len), nearex, len, nearex + 1, nearex + 2); memmove(nearex + 2, nearex + 1, len); } @@ -1358,9 +1407,9 @@ int ext4_ext_walk_space(struct inode *inode, unsigned long block, cbex.ec_start = 0; cbex.ec_type = EXT4_EXT_CACHE_GAP; } else { - cbex.ec_block = le32_to_cpu(ex->ee_block); - cbex.ec_len = le16_to_cpu(ex->ee_len); - cbex.ec_start = ext_pblock(ex); + cbex.ec_block = le32_to_cpu(ex->ee_block); + cbex.ec_len = le16_to_cpu(ex->ee_len); + cbex.ec_start = ext_pblock(ex); cbex.ec_type = EXT4_EXT_CACHE_EXTENT; } @@ -1431,16 +1480,16 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, len = le32_to_cpu(ex->ee_block) - block; ext_debug("cache gap(before): %lu [%lu:%lu]", (unsigned long) block, - (unsigned long) le32_to_cpu(ex->ee_block), - (unsigned long) le16_to_cpu(ex->ee_len)); + (unsigned long) le32_to_cpu(ex->ee_block), + (unsigned long) le16_to_cpu(ex->ee_len)); } else if (block >= le32_to_cpu(ex->ee_block) - + le16_to_cpu(ex->ee_len)) { - lblock = le32_to_cpu(ex->ee_block) - + le16_to_cpu(ex->ee_len); + + le16_to_cpu(ex->ee_len)) { + lblock = le32_to_cpu(ex->ee_block) + + le16_to_cpu(ex->ee_len); len = ext4_ext_next_allocated_block(path); ext_debug("cache gap(after): [%lu:%lu] %lu", - (unsigned long) le32_to_cpu(ex->ee_block), - (unsigned long) le16_to_cpu(ex->ee_len), + (unsigned long) le32_to_cpu(ex->ee_block), + (unsigned long) le16_to_cpu(ex->ee_len), (unsigned long) block); BUG_ON(len == lblock); len = len - lblock; @@ -1468,9 +1517,9 @@ ext4_ext_in_cache(struct inode *inode, unsigned long block, BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP && cex->ec_type != EXT4_EXT_CACHE_EXTENT); if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) { - ex->ee_block = cpu_to_le32(cex->ec_block); + ex->ee_block = cpu_to_le32(cex->ec_block); ext4_ext_store_pblock(ex, cex->ec_start); - ex->ee_len = cpu_to_le16(cex->ec_len); + ex->ee_len = cpu_to_le16(cex->ec_len); ext_debug("%lu cached by %lu:%lu:%llu\n", (unsigned long) block, (unsigned long) cex->ec_block, @@ -1956,9 +2005,9 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, /* we should allocate requested block */ } else if (goal == EXT4_EXT_CACHE_EXTENT) { /* block is already allocated */ - newblock = iblock - - le32_to_cpu(newex.ee_block) - + ext_pblock(&newex); + newblock = iblock + - le32_to_cpu(newex.ee_block) + + ext_pblock(&newex); /* number of remaining blocks in the extent */ allocated = le16_to_cpu(newex.ee_len) - (iblock - le32_to_cpu(newex.ee_block)); @@ -1987,7 +2036,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, ex = path[depth].p_ext; if (ex) { - unsigned long ee_block = le32_to_cpu(ex->ee_block); + unsigned long ee_block = le32_to_cpu(ex->ee_block); ext4_fsblk_t ee_start = ext_pblock(ex); unsigned short ee_len = le16_to_cpu(ex->ee_len); @@ -2000,7 +2049,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, if (ee_len > EXT_MAX_LEN) goto out2; /* if found extent covers block, simply return it */ - if (iblock >= ee_block && iblock < ee_block + ee_len) { + if (iblock >= ee_block && iblock < ee_block + ee_len) { newblock = iblock - ee_block + ee_start; /* number of remaining blocks in the extent */ allocated = ee_len - (iblock - ee_block); @@ -2031,7 +2080,15 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, /* allocate new block */ goal = ext4_ext_find_goal(inode, path, iblock); - allocated = max_blocks; + + /* Check if we can really insert (iblock)::(iblock+max_blocks) extent */ + newex.ee_block = cpu_to_le32(iblock); + newex.ee_len = cpu_to_le16(max_blocks); + err = ext4_ext_check_overlap(inode, &newex, path); + if (err) + allocated = le16_to_cpu(newex.ee_len); + else + allocated = max_blocks; newblock = ext4_new_blocks(handle, inode, goal, &allocated, &err); if (!newblock) goto out2; @@ -2039,12 +2096,15 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, goal, newblock, allocated); /* try to insert new extent into found leaf and return */ - newex.ee_block = cpu_to_le32(iblock); ext4_ext_store_pblock(&newex, newblock); newex.ee_len = cpu_to_le16(allocated); err = ext4_ext_insert_extent(handle, inode, path, &newex); - if (err) + if (err) { + /* free data blocks we just allocated */ + ext4_free_blocks(handle, inode, ext_pblock(&newex), + le16_to_cpu(newex.ee_len)); goto out2; + } if (extend_disksize && inode->i_size > EXT4_I(inode)->i_disksize) EXT4_I(inode)->i_disksize = inode->i_size; @@ -2157,11 +2217,3 @@ int ext4_ext_writepage_trans_blocks(struct inode *inode, int num) return needed; } - -EXPORT_SYMBOL(ext4_mark_inode_dirty); -EXPORT_SYMBOL(ext4_ext_invalidate_cache); -EXPORT_SYMBOL(ext4_ext_insert_extent); -EXPORT_SYMBOL(ext4_ext_walk_space); -EXPORT_SYMBOL(ext4_ext_find_goal); -EXPORT_SYMBOL(ext4_ext_calc_credits_for_insert); - diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index b34182b6ee4d1ea9cba530ea5dac87a787a8b7c1..0bcf62a750ffe29f788b5eb9d1c9b209ec8d3f54 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -255,8 +255,8 @@ static int verify_chain(Indirect *from, Indirect *to) * @inode: inode in question (we are only interested in its superblock) * @i_block: block number to be parsed * @offsets: array to store the offsets in - * @boundary: set this non-zero if the referred-to block is likely to be - * followed (on disk) by an indirect block. + * @boundary: set this non-zero if the referred-to block is likely to be + * followed (on disk) by an indirect block. * * To store the locations of file's data ext4 uses a data structure common * for UNIX filesystems - tree of pointers anchored in the inode, with diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 4ec57be5baf5df659a4f9c2d9330c8bfbe10c796..2811e5720ad019d18e9976ac8afc21e3c885768b 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -46,7 +46,7 @@ */ #define NAMEI_RA_CHUNKS 2 #define NAMEI_RA_BLOCKS 4 -#define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS) +#define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS) #define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b)) static struct buffer_head *ext4_append(handle_t *handle, @@ -241,7 +241,7 @@ static inline unsigned dx_node_limit (struct inode *dir) static void dx_show_index (char * label, struct dx_entry *entries) { int i, n = dx_get_count (entries); - printk("%s index ", label); + printk("%s index ", label); for (i = 0; i < n; i++) { printk("%x->%u ", i? dx_get_hash(entries + i) : 0, dx_get_block(entries + i)); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index cb9afdd0e26e5f8f458329aaab3e04be6daf9a5c..175b68c609685fb06742caf6bed160ca7a98d68b 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1985,7 +1985,7 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb, if (bd_claim(bdev, sb)) { printk(KERN_ERR - "EXT4: failed to claim external journal device.\n"); + "EXT4: failed to claim external journal device.\n"); blkdev_put(bdev); return NULL; } diff --git a/include/linux/ext4_fs.h b/include/linux/ext4_fs.h index 54c576d414c32b83d993942188e66b4171962f85..de1f9f78625a3279bf5ddb580d57cc1003eef152 100644 --- a/include/linux/ext4_fs.h +++ b/include/linux/ext4_fs.h @@ -32,9 +32,9 @@ /* * Define EXT4_RESERVATION to reserve data blocks for expanding files */ -#define EXT4_DEFAULT_RESERVE_BLOCKS 8 +#define EXT4_DEFAULT_RESERVE_BLOCKS 8 /*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */ -#define EXT4_MAX_RESERVE_BLOCKS 1027 +#define EXT4_MAX_RESERVE_BLOCKS 1027 #define EXT4_RESERVE_WINDOW_NOT_ALLOCATED 0 /* * Always enable hashed directories @@ -204,12 +204,12 @@ struct ext4_group_desc /* Used to pass group descriptor data when online resize is done */ struct ext4_new_group_input { - __u32 group; /* Group number for this data */ - __u64 block_bitmap; /* Absolute block number of block bitmap */ - __u64 inode_bitmap; /* Absolute block number of inode bitmap */ - __u64 inode_table; /* Absolute block number of inode table start */ - __u32 blocks_count; /* Total number of blocks in this group */ - __u16 reserved_blocks; /* Number of reserved blocks in this group */ + __u32 group; /* Group number for this data */ + __u64 block_bitmap; /* Absolute block number of block bitmap */ + __u64 inode_bitmap; /* Absolute block number of inode bitmap */ + __u64 inode_table; /* Absolute block number of inode table start */ + __u32 blocks_count; /* Total number of blocks in this group */ + __u16 reserved_blocks; /* Number of reserved blocks in this group */ __u16 unused; }; @@ -310,7 +310,7 @@ struct ext4_inode { __u8 l_i_frag; /* Fragment number */ __u8 l_i_fsize; /* Fragment size */ __le16 l_i_file_acl_high; - __le16 l_i_uid_high; /* these 2 fields */ + __le16 l_i_uid_high; /* these 2 fields */ __le16 l_i_gid_high; /* were reserved2[0] */ __u32 l_i_reserved2; } linux2; @@ -513,7 +513,14 @@ struct ext4_super_block { /*150*/ __le32 s_blocks_count_hi; /* Blocks count */ __le32 s_r_blocks_count_hi; /* Reserved blocks count */ __le32 s_free_blocks_count_hi; /* Free blocks count */ - __u32 s_reserved[169]; /* Padding to the end of the block */ + __u16 s_min_extra_isize; /* All inodes have at least # bytes */ + __u16 s_want_extra_isize; /* New inodes should reserve # bytes */ + __u32 s_flags; /* Miscellaneous flags */ + __u16 s_raid_stride; /* RAID stride */ + __u16 s_mmp_interval; /* # seconds to wait in MMP checking */ + __u64 s_mmp_block; /* Block for multi-mount protection */ + __u32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/ + __u32 s_reserved[163]; /* Padding to the end of the block */ }; #ifdef __KERNEL__ @@ -780,9 +787,9 @@ void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr, * Ok, these declarations are also in but none of the * ext4 source programs needs to include it so they are duplicated here. */ -# define NORET_TYPE /**/ -# define ATTRIB_NORET __attribute__((noreturn)) -# define NORET_AND noreturn, +# define NORET_TYPE /**/ +# define ATTRIB_NORET __attribute__((noreturn)) +# define NORET_AND noreturn, /* balloc.c */ extern unsigned int ext4_block_group(struct super_block *sb, diff --git a/include/linux/ext4_fs_extents.h b/include/linux/ext4_fs_extents.h index 7eb1d73fc5d1634ee535f777d7d5576b784e9180..acfe59740b032d1a5adcfebaa69ca491c048cafa 100644 --- a/include/linux/ext4_fs_extents.h +++ b/include/linux/ext4_fs_extents.h @@ -151,8 +151,8 @@ typedef int (*ext_prepare_callback)(struct inode *, struct ext4_ext_path *, ((struct ext4_extent_idx *) (((char *) (__hdr__)) + \ sizeof(struct ext4_extent_header))) #define EXT_HAS_FREE_INDEX(__path__) \ - (le16_to_cpu((__path__)->p_hdr->eh_entries) \ - < le16_to_cpu((__path__)->p_hdr->eh_max)) + (le16_to_cpu((__path__)->p_hdr->eh_entries) \ + < le16_to_cpu((__path__)->p_hdr->eh_max)) #define EXT_LAST_EXTENT(__hdr__) \ (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1) #define EXT_LAST_INDEX(__hdr__) \ @@ -190,6 +190,7 @@ ext4_ext_invalidate_cache(struct inode *inode) extern int ext4_extent_tree_init(handle_t *, struct inode *); extern int ext4_ext_calc_credits_for_insert(struct inode *, struct ext4_ext_path *); +extern unsigned int ext4_ext_check_overlap(struct inode *, struct ext4_extent *, struct ext4_ext_path *); extern int ext4_ext_insert_extent(handle_t *, struct inode *, struct ext4_ext_path *, struct ext4_extent *); extern int ext4_ext_walk_space(struct inode *, unsigned long, unsigned long, ext_prepare_callback, void *); extern struct ext4_ext_path * ext4_ext_find_extent(struct inode *, int, struct ext4_ext_path *); diff --git a/include/linux/ext4_fs_i.h b/include/linux/ext4_fs_i.h index d5b177e5b3958338d13bfc46a9d8fcc1e338a9ee..9de4944069955a46e0183fbfe0a00adf04163270 100644 --- a/include/linux/ext4_fs_i.h +++ b/include/linux/ext4_fs_i.h @@ -41,14 +41,14 @@ struct ext4_reserve_window_node { struct ext4_block_alloc_info { /* information about reservation window */ - struct ext4_reserve_window_node rsv_window_node; + struct ext4_reserve_window_node rsv_window_node; /* * was i_next_alloc_block in ext4_inode_info * is the logical (file-relative) number of the * most-recently-allocated block in this file. * We use this for detecting linearly ascending allocation requests. */ - __u32 last_alloc_logical_block; + __u32 last_alloc_logical_block; /* * Was i_next_alloc_goal in ext4_inode_info * is the *physical* companion to i_next_alloc_block. @@ -56,7 +56,7 @@ struct ext4_block_alloc_info { * allocated to this file. This give us the goal (target) for the next * allocation when we detect linearly ascending requests. */ - ext4_fsblk_t last_alloc_physical_block; + ext4_fsblk_t last_alloc_physical_block; }; #define rsv_start rsv_window._rsv_start