提交 bbbd27e6 编写于 作者: L Linus Torvalds

Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs

Pull ext2, ext3, udf updates from Jan Kara:
 "Several UDF fixes, a support for UDF extent cache, and couple of ext2
  and ext3 cleanups and minor fixes"

* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs:
  Ext2: remove the static function release_blocks to optimize the kernel
  Ext2: mark inode dirty after the function dquot_free_block_nodirty is called
  Ext2: remove the overhead check about sb in the function ext2_new_blocks
  udf: Remove unused s_extLength from udf_bitmap
  udf: Make s_block_bitmap standard array
  udf: Fix bitmap overflow on large filesystems with small block size
  udf: add extent cache support in case of file reading
  udf: Write LVID to disk after opening / closing
  Ext3: return ENOMEM rather than EIO if sb_getblk fails
  Ext2: return ENOMEM rather than EIO if sb_getblk fails
  Ext3: use unlikely to improve the efficiency of the kernel
  Ext2: use unlikely to improve the efficiency of the kernel
  Ext3: add necessary check in case IO error happens
  Ext2: free memory allocated and forget buffer head when io error happens
  ext3: Fix memory leak when quota options are specified multiple times
  ext3, ext4, ocfs2: remove unused macro NAMEI_RA_INDEX
...@@ -159,15 +159,6 @@ read_block_bitmap(struct super_block *sb, unsigned int block_group) ...@@ -159,15 +159,6 @@ read_block_bitmap(struct super_block *sb, unsigned int block_group)
return bh; return bh;
} }
static void release_blocks(struct super_block *sb, int count)
{
if (count) {
struct ext2_sb_info *sbi = EXT2_SB(sb);
percpu_counter_add(&sbi->s_freeblocks_counter, count);
}
}
static void group_adjust_blocks(struct super_block *sb, int group_no, static void group_adjust_blocks(struct super_block *sb, int group_no,
struct ext2_group_desc *desc, struct buffer_head *bh, int count) struct ext2_group_desc *desc, struct buffer_head *bh, int count)
{ {
...@@ -568,8 +559,11 @@ void ext2_free_blocks (struct inode * inode, unsigned long block, ...@@ -568,8 +559,11 @@ void ext2_free_blocks (struct inode * inode, unsigned long block,
} }
error_return: error_return:
brelse(bitmap_bh); brelse(bitmap_bh);
release_blocks(sb, freed); if (freed) {
dquot_free_block_nodirty(inode, freed); percpu_counter_add(&sbi->s_freeblocks_counter, freed);
dquot_free_block_nodirty(inode, freed);
mark_inode_dirty(inode);
}
} }
/** /**
...@@ -1239,10 +1233,6 @@ ext2_fsblk_t ext2_new_blocks(struct inode *inode, ext2_fsblk_t goal, ...@@ -1239,10 +1233,6 @@ ext2_fsblk_t ext2_new_blocks(struct inode *inode, ext2_fsblk_t goal,
*errp = -ENOSPC; *errp = -ENOSPC;
sb = inode->i_sb; sb = inode->i_sb;
if (!sb) {
printk("ext2_new_blocks: nonexistent device");
return 0;
}
/* /*
* Check quota for allocation of this block. * Check quota for allocation of this block.
...@@ -1416,9 +1406,11 @@ ext2_fsblk_t ext2_new_blocks(struct inode *inode, ext2_fsblk_t goal, ...@@ -1416,9 +1406,11 @@ ext2_fsblk_t ext2_new_blocks(struct inode *inode, ext2_fsblk_t goal,
*errp = 0; *errp = 0;
brelse(bitmap_bh); brelse(bitmap_bh);
dquot_free_block_nodirty(inode, *count-num); if (num < *count) {
mark_inode_dirty(inode); dquot_free_block_nodirty(inode, *count-num);
*count = num; mark_inode_dirty(inode);
*count = num;
}
return ret_block; return ret_block;
io_error: io_error:
......
...@@ -495,6 +495,10 @@ static int ext2_alloc_branch(struct inode *inode, ...@@ -495,6 +495,10 @@ static int ext2_alloc_branch(struct inode *inode,
* parent to disk. * parent to disk.
*/ */
bh = sb_getblk(inode->i_sb, new_blocks[n-1]); bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
if (unlikely(!bh)) {
err = -ENOMEM;
goto failed;
}
branch[n].bh = bh; branch[n].bh = bh;
lock_buffer(bh); lock_buffer(bh);
memset(bh->b_data, 0, blocksize); memset(bh->b_data, 0, blocksize);
...@@ -523,6 +527,14 @@ static int ext2_alloc_branch(struct inode *inode, ...@@ -523,6 +527,14 @@ static int ext2_alloc_branch(struct inode *inode,
} }
*blks = num; *blks = num;
return err; return err;
failed:
for (i = 1; i < n; i++)
bforget(branch[i].bh);
for (i = 0; i < indirect_blks; i++)
ext2_free_blocks(inode, new_blocks[i], 1);
ext2_free_blocks(inode, new_blocks[i], num);
return err;
} }
/** /**
......
...@@ -1500,7 +1500,7 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type, ...@@ -1500,7 +1500,7 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type,
bh = sb_bread(sb, tmp_bh.b_blocknr); bh = sb_bread(sb, tmp_bh.b_blocknr);
else else
bh = sb_getblk(sb, tmp_bh.b_blocknr); bh = sb_getblk(sb, tmp_bh.b_blocknr);
if (!bh) { if (unlikely(!bh)) {
err = -EIO; err = -EIO;
goto out; goto out;
} }
......
...@@ -662,10 +662,10 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh, ...@@ -662,10 +662,10 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
ea_idebug(inode, "creating block %d", block); ea_idebug(inode, "creating block %d", block);
new_bh = sb_getblk(sb, block); new_bh = sb_getblk(sb, block);
if (!new_bh) { if (unlikely(!new_bh)) {
ext2_free_blocks(inode, block, 1); ext2_free_blocks(inode, block, 1);
mark_inode_dirty(inode); mark_inode_dirty(inode);
error = -EIO; error = -ENOMEM;
goto cleanup; goto cleanup;
} }
lock_buffer(new_bh); lock_buffer(new_bh);
......
...@@ -676,6 +676,10 @@ static int ext3_alloc_branch(handle_t *handle, struct inode *inode, ...@@ -676,6 +676,10 @@ static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
* parent to disk. * parent to disk.
*/ */
bh = sb_getblk(inode->i_sb, new_blocks[n-1]); bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
if (unlikely(!bh)) {
err = -ENOMEM;
goto failed;
}
branch[n].bh = bh; branch[n].bh = bh;
lock_buffer(bh); lock_buffer(bh);
BUFFER_TRACE(bh, "call get_create_access"); BUFFER_TRACE(bh, "call get_create_access");
...@@ -717,7 +721,7 @@ static int ext3_alloc_branch(handle_t *handle, struct inode *inode, ...@@ -717,7 +721,7 @@ static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
BUFFER_TRACE(branch[i].bh, "call journal_forget"); BUFFER_TRACE(branch[i].bh, "call journal_forget");
ext3_journal_forget(handle, branch[i].bh); ext3_journal_forget(handle, branch[i].bh);
} }
for (i = 0; i <indirect_blks; i++) for (i = 0; i < indirect_blks; i++)
ext3_free_blocks(handle, inode, new_blocks[i], 1); ext3_free_blocks(handle, inode, new_blocks[i], 1);
ext3_free_blocks(handle, inode, new_blocks[i], num); ext3_free_blocks(handle, inode, new_blocks[i], num);
...@@ -1078,8 +1082,8 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode, ...@@ -1078,8 +1082,8 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode,
if (!err && buffer_mapped(&dummy)) { if (!err && buffer_mapped(&dummy)) {
struct buffer_head *bh; struct buffer_head *bh;
bh = sb_getblk(inode->i_sb, dummy.b_blocknr); bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
if (!bh) { if (unlikely(!bh)) {
*errp = -EIO; *errp = -ENOMEM;
goto err; goto err;
} }
if (buffer_new(&dummy)) { if (buffer_new(&dummy)) {
...@@ -2729,12 +2733,12 @@ static int __ext3_get_inode_loc(struct inode *inode, ...@@ -2729,12 +2733,12 @@ static int __ext3_get_inode_loc(struct inode *inode,
return -EIO; return -EIO;
bh = sb_getblk(inode->i_sb, block); bh = sb_getblk(inode->i_sb, block);
if (!bh) { if (unlikely(!bh)) {
ext3_error (inode->i_sb, "ext3_get_inode_loc", ext3_error (inode->i_sb, "ext3_get_inode_loc",
"unable to read inode block - " "unable to read inode block - "
"inode=%lu, block="E3FSBLK, "inode=%lu, block="E3FSBLK,
inode->i_ino, block); inode->i_ino, block);
return -EIO; return -ENOMEM;
} }
if (!buffer_uptodate(bh)) { if (!buffer_uptodate(bh)) {
lock_buffer(bh); lock_buffer(bh);
...@@ -2783,7 +2787,7 @@ static int __ext3_get_inode_loc(struct inode *inode, ...@@ -2783,7 +2787,7 @@ static int __ext3_get_inode_loc(struct inode *inode,
bitmap_bh = sb_getblk(inode->i_sb, bitmap_bh = sb_getblk(inode->i_sb,
le32_to_cpu(desc->bg_inode_bitmap)); le32_to_cpu(desc->bg_inode_bitmap));
if (!bitmap_bh) if (unlikely(!bitmap_bh))
goto make_io; goto make_io;
/* /*
......
...@@ -36,7 +36,6 @@ ...@@ -36,7 +36,6 @@
#define NAMEI_RA_CHUNKS 2 #define NAMEI_RA_CHUNKS 2
#define NAMEI_RA_BLOCKS 4 #define NAMEI_RA_BLOCKS 4
#define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS) #define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
#define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b))
static struct buffer_head *ext3_append(handle_t *handle, static struct buffer_head *ext3_append(handle_t *handle,
struct inode *inode, struct inode *inode,
......
...@@ -116,8 +116,8 @@ static struct buffer_head *bclean(handle_t *handle, struct super_block *sb, ...@@ -116,8 +116,8 @@ static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
int err; int err;
bh = sb_getblk(sb, blk); bh = sb_getblk(sb, blk);
if (!bh) if (unlikely(!bh))
return ERR_PTR(-EIO); return ERR_PTR(-ENOMEM);
if ((err = ext3_journal_get_write_access(handle, bh))) { if ((err = ext3_journal_get_write_access(handle, bh))) {
brelse(bh); brelse(bh);
bh = ERR_PTR(err); bh = ERR_PTR(err);
...@@ -234,8 +234,8 @@ static int setup_new_group_blocks(struct super_block *sb, ...@@ -234,8 +234,8 @@ static int setup_new_group_blocks(struct super_block *sb,
goto exit_bh; goto exit_bh;
gdb = sb_getblk(sb, block); gdb = sb_getblk(sb, block);
if (!gdb) { if (unlikely(!gdb)) {
err = -EIO; err = -ENOMEM;
goto exit_bh; goto exit_bh;
} }
if ((err = ext3_journal_get_write_access(handle, gdb))) { if ((err = ext3_journal_get_write_access(handle, gdb))) {
...@@ -722,8 +722,8 @@ static void update_backups(struct super_block *sb, ...@@ -722,8 +722,8 @@ static void update_backups(struct super_block *sb,
break; break;
bh = sb_getblk(sb, group * bpg + blk_off); bh = sb_getblk(sb, group * bpg + blk_off);
if (!bh) { if (unlikely(!bh)) {
err = -EIO; err = -ENOMEM;
break; break;
} }
ext3_debug("update metadata backup %#04lx\n", ext3_debug("update metadata backup %#04lx\n",
......
...@@ -916,21 +916,24 @@ static int set_qf_name(struct super_block *sb, int qtype, substring_t *args) ...@@ -916,21 +916,24 @@ static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
"Not enough memory for storing quotafile name"); "Not enough memory for storing quotafile name");
return 0; return 0;
} }
if (sbi->s_qf_names[qtype] && if (sbi->s_qf_names[qtype]) {
strcmp(sbi->s_qf_names[qtype], qname)) { int same = !strcmp(sbi->s_qf_names[qtype], qname);
ext3_msg(sb, KERN_ERR,
"%s quota file already specified", QTYPE2NAME(qtype));
kfree(qname); kfree(qname);
return 0; if (!same) {
ext3_msg(sb, KERN_ERR,
"%s quota file already specified",
QTYPE2NAME(qtype));
}
return same;
} }
sbi->s_qf_names[qtype] = qname; if (strchr(qname, '/')) {
if (strchr(sbi->s_qf_names[qtype], '/')) {
ext3_msg(sb, KERN_ERR, ext3_msg(sb, KERN_ERR,
"quotafile must be on filesystem root"); "quotafile must be on filesystem root");
kfree(sbi->s_qf_names[qtype]); kfree(qname);
sbi->s_qf_names[qtype] = NULL;
return 0; return 0;
} }
sbi->s_qf_names[qtype] = qname;
set_opt(sbi->s_mount_opt, QUOTA); set_opt(sbi->s_mount_opt, QUOTA);
return 1; return 1;
} }
...@@ -945,11 +948,10 @@ static int clear_qf_name(struct super_block *sb, int qtype) { ...@@ -945,11 +948,10 @@ static int clear_qf_name(struct super_block *sb, int qtype) {
" when quota turned on"); " when quota turned on");
return 0; return 0;
} }
/* if (sbi->s_qf_names[qtype]) {
* The space will be released later when all options are confirmed kfree(sbi->s_qf_names[qtype]);
* to be correct sbi->s_qf_names[qtype] = NULL;
*/ }
sbi->s_qf_names[qtype] = NULL;
return 1; return 1;
} }
#endif #endif
...@@ -2606,7 +2608,18 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data) ...@@ -2606,7 +2608,18 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data)
#ifdef CONFIG_QUOTA #ifdef CONFIG_QUOTA
old_opts.s_jquota_fmt = sbi->s_jquota_fmt; old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
for (i = 0; i < MAXQUOTAS; i++) for (i = 0; i < MAXQUOTAS; i++)
old_opts.s_qf_names[i] = sbi->s_qf_names[i]; if (sbi->s_qf_names[i]) {
old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
GFP_KERNEL);
if (!old_opts.s_qf_names[i]) {
int j;
for (j = 0; j < i; j++)
kfree(old_opts.s_qf_names[j]);
return -ENOMEM;
}
} else
old_opts.s_qf_names[i] = NULL;
#endif #endif
/* /*
...@@ -2699,9 +2712,7 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data) ...@@ -2699,9 +2712,7 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data)
#ifdef CONFIG_QUOTA #ifdef CONFIG_QUOTA
/* Release old quota file names */ /* Release old quota file names */
for (i = 0; i < MAXQUOTAS; i++) for (i = 0; i < MAXQUOTAS; i++)
if (old_opts.s_qf_names[i] && kfree(old_opts.s_qf_names[i]);
old_opts.s_qf_names[i] != sbi->s_qf_names[i])
kfree(old_opts.s_qf_names[i]);
#endif #endif
if (enable_quota) if (enable_quota)
dquot_resume(sb, -1); dquot_resume(sb, -1);
...@@ -2715,9 +2726,7 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data) ...@@ -2715,9 +2726,7 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data)
#ifdef CONFIG_QUOTA #ifdef CONFIG_QUOTA
sbi->s_jquota_fmt = old_opts.s_jquota_fmt; sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
for (i = 0; i < MAXQUOTAS; i++) { for (i = 0; i < MAXQUOTAS; i++) {
if (sbi->s_qf_names[i] && kfree(sbi->s_qf_names[i]);
old_opts.s_qf_names[i] != sbi->s_qf_names[i])
kfree(sbi->s_qf_names[i]);
sbi->s_qf_names[i] = old_opts.s_qf_names[i]; sbi->s_qf_names[i] = old_opts.s_qf_names[i];
} }
#endif #endif
......
...@@ -813,10 +813,10 @@ ext3_xattr_block_set(handle_t *handle, struct inode *inode, ...@@ -813,10 +813,10 @@ ext3_xattr_block_set(handle_t *handle, struct inode *inode,
ea_idebug(inode, "creating block %d", block); ea_idebug(inode, "creating block %d", block);
new_bh = sb_getblk(sb, block); new_bh = sb_getblk(sb, block);
if (!new_bh) { if (unlikely(!new_bh)) {
getblk_failed: getblk_failed:
ext3_free_blocks(handle, inode, block, 1); ext3_free_blocks(handle, inode, block, 1);
error = -EIO; error = -ENOMEM;
goto cleanup; goto cleanup;
} }
lock_buffer(new_bh); lock_buffer(new_bh);
......
...@@ -47,7 +47,6 @@ ...@@ -47,7 +47,6 @@
#define NAMEI_RA_CHUNKS 2 #define NAMEI_RA_CHUNKS 2
#define NAMEI_RA_BLOCKS 4 #define NAMEI_RA_BLOCKS 4
#define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS) #define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
#define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b))
static struct buffer_head *ext4_append(handle_t *handle, static struct buffer_head *ext4_append(handle_t *handle,
struct inode *inode, struct inode *inode,
......
...@@ -67,7 +67,6 @@ ...@@ -67,7 +67,6 @@
#define NAMEI_RA_CHUNKS 2 #define NAMEI_RA_CHUNKS 2
#define NAMEI_RA_BLOCKS 4 #define NAMEI_RA_BLOCKS 4
#define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS) #define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
#define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b))
static unsigned char ocfs2_filetype_table[] = { static unsigned char ocfs2_filetype_table[] = {
DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
......
...@@ -67,6 +67,74 @@ static void udf_update_extents(struct inode *, ...@@ -67,6 +67,74 @@ static void udf_update_extents(struct inode *,
struct extent_position *); struct extent_position *);
static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int); static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
static void __udf_clear_extent_cache(struct inode *inode)
{
struct udf_inode_info *iinfo = UDF_I(inode);
if (iinfo->cached_extent.lstart != -1) {
brelse(iinfo->cached_extent.epos.bh);
iinfo->cached_extent.lstart = -1;
}
}
/* Invalidate extent cache */
static void udf_clear_extent_cache(struct inode *inode)
{
struct udf_inode_info *iinfo = UDF_I(inode);
spin_lock(&iinfo->i_extent_cache_lock);
__udf_clear_extent_cache(inode);
spin_unlock(&iinfo->i_extent_cache_lock);
}
/* Return contents of extent cache */
static int udf_read_extent_cache(struct inode *inode, loff_t bcount,
loff_t *lbcount, struct extent_position *pos)
{
struct udf_inode_info *iinfo = UDF_I(inode);
int ret = 0;
spin_lock(&iinfo->i_extent_cache_lock);
if ((iinfo->cached_extent.lstart <= bcount) &&
(iinfo->cached_extent.lstart != -1)) {
/* Cache hit */
*lbcount = iinfo->cached_extent.lstart;
memcpy(pos, &iinfo->cached_extent.epos,
sizeof(struct extent_position));
if (pos->bh)
get_bh(pos->bh);
ret = 1;
}
spin_unlock(&iinfo->i_extent_cache_lock);
return ret;
}
/* Add extent to extent cache */
static void udf_update_extent_cache(struct inode *inode, loff_t estart,
struct extent_position *pos, int next_epos)
{
struct udf_inode_info *iinfo = UDF_I(inode);
spin_lock(&iinfo->i_extent_cache_lock);
/* Invalidate previously cached extent */
__udf_clear_extent_cache(inode);
if (pos->bh)
get_bh(pos->bh);
memcpy(&iinfo->cached_extent.epos, pos,
sizeof(struct extent_position));
iinfo->cached_extent.lstart = estart;
if (next_epos)
switch (iinfo->i_alloc_type) {
case ICBTAG_FLAG_AD_SHORT:
iinfo->cached_extent.epos.offset -=
sizeof(struct short_ad);
break;
case ICBTAG_FLAG_AD_LONG:
iinfo->cached_extent.epos.offset -=
sizeof(struct long_ad);
}
spin_unlock(&iinfo->i_extent_cache_lock);
}
void udf_evict_inode(struct inode *inode) void udf_evict_inode(struct inode *inode)
{ {
...@@ -90,6 +158,7 @@ void udf_evict_inode(struct inode *inode) ...@@ -90,6 +158,7 @@ void udf_evict_inode(struct inode *inode)
} }
kfree(iinfo->i_ext.i_data); kfree(iinfo->i_ext.i_data);
iinfo->i_ext.i_data = NULL; iinfo->i_ext.i_data = NULL;
udf_clear_extent_cache(inode);
if (want_delete) { if (want_delete) {
udf_free_inode(inode); udf_free_inode(inode);
} }
...@@ -105,6 +174,7 @@ static void udf_write_failed(struct address_space *mapping, loff_t to) ...@@ -105,6 +174,7 @@ static void udf_write_failed(struct address_space *mapping, loff_t to)
truncate_pagecache(inode, to, isize); truncate_pagecache(inode, to, isize);
if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
down_write(&iinfo->i_data_sem); down_write(&iinfo->i_data_sem);
udf_clear_extent_cache(inode);
udf_truncate_extents(inode); udf_truncate_extents(inode);
up_write(&iinfo->i_data_sem); up_write(&iinfo->i_data_sem);
} }
...@@ -372,7 +442,7 @@ static int udf_get_block(struct inode *inode, sector_t block, ...@@ -372,7 +442,7 @@ static int udf_get_block(struct inode *inode, sector_t block,
iinfo->i_next_alloc_goal++; iinfo->i_next_alloc_goal++;
} }
udf_clear_extent_cache(inode);
phys = inode_getblk(inode, block, &err, &new); phys = inode_getblk(inode, block, &err, &new);
if (!phys) if (!phys)
goto abort; goto abort;
...@@ -1171,6 +1241,7 @@ int udf_setsize(struct inode *inode, loff_t newsize) ...@@ -1171,6 +1241,7 @@ int udf_setsize(struct inode *inode, loff_t newsize)
} else { } else {
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
down_write(&iinfo->i_data_sem); down_write(&iinfo->i_data_sem);
udf_clear_extent_cache(inode);
memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr + newsize, memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr + newsize,
0x00, bsize - newsize - 0x00, bsize - newsize -
udf_file_entry_alloc_offset(inode)); udf_file_entry_alloc_offset(inode));
...@@ -1184,6 +1255,7 @@ int udf_setsize(struct inode *inode, loff_t newsize) ...@@ -1184,6 +1255,7 @@ int udf_setsize(struct inode *inode, loff_t newsize)
if (err) if (err)
return err; return err;
down_write(&iinfo->i_data_sem); down_write(&iinfo->i_data_sem);
udf_clear_extent_cache(inode);
truncate_setsize(inode, newsize); truncate_setsize(inode, newsize);
udf_truncate_extents(inode); udf_truncate_extents(inode);
up_write(&iinfo->i_data_sem); up_write(&iinfo->i_data_sem);
...@@ -2156,11 +2228,12 @@ int8_t inode_bmap(struct inode *inode, sector_t block, ...@@ -2156,11 +2228,12 @@ int8_t inode_bmap(struct inode *inode, sector_t block,
struct udf_inode_info *iinfo; struct udf_inode_info *iinfo;
iinfo = UDF_I(inode); iinfo = UDF_I(inode);
pos->offset = 0; if (!udf_read_extent_cache(inode, bcount, &lbcount, pos)) {
pos->block = iinfo->i_location; pos->offset = 0;
pos->bh = NULL; pos->block = iinfo->i_location;
pos->bh = NULL;
}
*elen = 0; *elen = 0;
do { do {
etype = udf_next_aext(inode, pos, eloc, elen, 1); etype = udf_next_aext(inode, pos, eloc, elen, 1);
if (etype == -1) { if (etype == -1) {
...@@ -2170,7 +2243,8 @@ int8_t inode_bmap(struct inode *inode, sector_t block, ...@@ -2170,7 +2243,8 @@ int8_t inode_bmap(struct inode *inode, sector_t block,
} }
lbcount += *elen; lbcount += *elen;
} while (lbcount <= bcount); } while (lbcount <= bcount);
/* update extent cache */
udf_update_extent_cache(inode, lbcount - *elen, pos, 1);
*offset = (bcount + *elen - lbcount) >> blocksize_bits; *offset = (bcount + *elen - lbcount) >> blocksize_bits;
return etype; return etype;
......
...@@ -134,6 +134,8 @@ static struct inode *udf_alloc_inode(struct super_block *sb) ...@@ -134,6 +134,8 @@ static struct inode *udf_alloc_inode(struct super_block *sb)
ei->i_next_alloc_goal = 0; ei->i_next_alloc_goal = 0;
ei->i_strat4096 = 0; ei->i_strat4096 = 0;
init_rwsem(&ei->i_data_sem); init_rwsem(&ei->i_data_sem);
ei->cached_extent.lstart = -1;
spin_lock_init(&ei->i_extent_cache_lock);
return &ei->vfs_inode; return &ei->vfs_inode;
} }
...@@ -1021,7 +1023,6 @@ static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index) ...@@ -1021,7 +1023,6 @@ static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index)
if (bitmap == NULL) if (bitmap == NULL)
return NULL; return NULL;
bitmap->s_block_bitmap = (struct buffer_head **)(bitmap + 1);
bitmap->s_nr_groups = nr_groups; bitmap->s_nr_groups = nr_groups;
return bitmap; return bitmap;
} }
...@@ -1079,8 +1080,6 @@ static int udf_fill_partdesc_info(struct super_block *sb, ...@@ -1079,8 +1080,6 @@ static int udf_fill_partdesc_info(struct super_block *sb,
if (!bitmap) if (!bitmap)
return 1; return 1;
map->s_uspace.s_bitmap = bitmap; map->s_uspace.s_bitmap = bitmap;
bitmap->s_extLength = le32_to_cpu(
phd->unallocSpaceBitmap.extLength);
bitmap->s_extPosition = le32_to_cpu( bitmap->s_extPosition = le32_to_cpu(
phd->unallocSpaceBitmap.extPosition); phd->unallocSpaceBitmap.extPosition);
map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP; map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP;
...@@ -1115,8 +1114,6 @@ static int udf_fill_partdesc_info(struct super_block *sb, ...@@ -1115,8 +1114,6 @@ static int udf_fill_partdesc_info(struct super_block *sb,
if (!bitmap) if (!bitmap)
return 1; return 1;
map->s_fspace.s_bitmap = bitmap; map->s_fspace.s_bitmap = bitmap;
bitmap->s_extLength = le32_to_cpu(
phd->freedSpaceBitmap.extLength);
bitmap->s_extPosition = le32_to_cpu( bitmap->s_extPosition = le32_to_cpu(
phd->freedSpaceBitmap.extPosition); phd->freedSpaceBitmap.extPosition);
map->s_partition_flags |= UDF_PART_FLAG_FREED_BITMAP; map->s_partition_flags |= UDF_PART_FLAG_FREED_BITMAP;
...@@ -1866,6 +1863,8 @@ static void udf_open_lvid(struct super_block *sb) ...@@ -1866,6 +1863,8 @@ static void udf_open_lvid(struct super_block *sb)
mark_buffer_dirty(bh); mark_buffer_dirty(bh);
sbi->s_lvid_dirty = 0; sbi->s_lvid_dirty = 0;
mutex_unlock(&sbi->s_alloc_mutex); mutex_unlock(&sbi->s_alloc_mutex);
/* Make opening of filesystem visible on the media immediately */
sync_dirty_buffer(bh);
} }
static void udf_close_lvid(struct super_block *sb) static void udf_close_lvid(struct super_block *sb)
...@@ -1906,6 +1905,8 @@ static void udf_close_lvid(struct super_block *sb) ...@@ -1906,6 +1905,8 @@ static void udf_close_lvid(struct super_block *sb)
mark_buffer_dirty(bh); mark_buffer_dirty(bh);
sbi->s_lvid_dirty = 0; sbi->s_lvid_dirty = 0;
mutex_unlock(&sbi->s_alloc_mutex); mutex_unlock(&sbi->s_alloc_mutex);
/* Make closing of filesystem visible on the media immediately */
sync_dirty_buffer(bh);
} }
u64 lvid_get_unique_id(struct super_block *sb) u64 lvid_get_unique_id(struct super_block *sb)
......
#ifndef _UDF_I_H #ifndef _UDF_I_H
#define _UDF_I_H #define _UDF_I_H
struct extent_position {
struct buffer_head *bh;
uint32_t offset;
struct kernel_lb_addr block;
};
struct udf_ext_cache {
/* Extent position */
struct extent_position epos;
/* Start logical offset in bytes */
loff_t lstart;
};
/* /*
* The i_data_sem and i_mutex serve for protection of allocation information * The i_data_sem and i_mutex serve for protection of allocation information
* of a regular files and symlinks. This includes all extents belonging to * of a regular files and symlinks. This includes all extents belonging to
...@@ -35,6 +48,9 @@ struct udf_inode_info { ...@@ -35,6 +48,9 @@ struct udf_inode_info {
__u8 *i_data; __u8 *i_data;
} i_ext; } i_ext;
struct rw_semaphore i_data_sem; struct rw_semaphore i_data_sem;
struct udf_ext_cache cached_extent;
/* Spinlock for protecting extent cache */
spinlock_t i_extent_cache_lock;
struct inode vfs_inode; struct inode vfs_inode;
}; };
......
...@@ -80,10 +80,9 @@ struct udf_virtual_data { ...@@ -80,10 +80,9 @@ struct udf_virtual_data {
}; };
struct udf_bitmap { struct udf_bitmap {
__u32 s_extLength;
__u32 s_extPosition; __u32 s_extPosition;
__u16 s_nr_groups; int s_nr_groups;
struct buffer_head **s_block_bitmap; struct buffer_head *s_block_bitmap[0];
}; };
struct udf_part_map { struct udf_part_map {
......
...@@ -113,11 +113,6 @@ struct ustr { ...@@ -113,11 +113,6 @@ struct ustr {
uint8_t u_len; uint8_t u_len;
}; };
struct extent_position {
struct buffer_head *bh;
uint32_t offset;
struct kernel_lb_addr block;
};
/* super.c */ /* super.c */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册