提交 6ef4d6bf 编写于 作者: E Evgeniy Dushistov 提交者: Linus Torvalds

[PATCH] ufs: change block number on the fly

First of all some necessary notes about UFS by it self: To avoid waste of disk
space the tail of file consists not from blocks (which is ordinary big enough,
16K usually), it consists from fragments(which is ordinary 2K).  When file is
growing its tail occupy 1 fragment, 2 fragments...  At some stage decision to
allocate whole block is made and all fragments are moved to one block.

How this situation was handled before:

  ufs_prepare_write
  ->block_prepare_write
    ->ufs_getfrag_block
      ->...
        ->ufs_new_fragments:

	bh = sb_bread
	bh->b_blocknr = result + i;
	mark_buffer_dirty (bh);

This is wrong solution, because:

- it didn't take into consideration that there is another cache: "inode page
  cache"

- because of sb_getblk uses not b_blocknr, (it uses page->index) to find
  certain block, this breaks sb_getblk.

How this situation is handled now: we go though all "page inode cache", if
there are no such page in cache we load it into cache, and change b_blocknr.
Signed-off-by: NEvgeniy Dushistov <dushistov@mail.ru>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 c9a27b5d
...@@ -39,7 +39,8 @@ static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, ...@@ -39,7 +39,8 @@ static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *,
/* /*
* Free 'count' fragments from fragment number 'fragment' * Free 'count' fragments from fragment number 'fragment'
*/ */
void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count) { void ufs_free_fragments(struct inode *inode, unsigned fragment, unsigned count)
{
struct super_block * sb; struct super_block * sb;
struct ufs_sb_private_info * uspi; struct ufs_sb_private_info * uspi;
struct ufs_super_block_first * usb1; struct ufs_super_block_first * usb1;
...@@ -134,7 +135,8 @@ void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count ...@@ -134,7 +135,8 @@ void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count
/* /*
* Free 'count' fragments from fragment number 'fragment' (free whole blocks) * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
*/ */
void ufs_free_blocks (struct inode * inode, unsigned fragment, unsigned count) { void ufs_free_blocks(struct inode *inode, unsigned fragment, unsigned count)
{
struct super_block * sb; struct super_block * sb;
struct ufs_sb_private_info * uspi; struct ufs_sb_private_info * uspi;
struct ufs_super_block_first * usb1; struct ufs_super_block_first * usb1;
...@@ -222,15 +224,118 @@ void ufs_free_blocks (struct inode * inode, unsigned fragment, unsigned count) { ...@@ -222,15 +224,118 @@ void ufs_free_blocks (struct inode * inode, unsigned fragment, unsigned count) {
return; return;
} }
static struct page *ufs_get_locked_page(struct address_space *mapping,
unsigned long index)
{
struct page *page;
try_again:
page = find_lock_page(mapping, index);
if (!page) {
page = read_cache_page(mapping, index,
(filler_t*)mapping->a_ops->readpage,
NULL);
if (IS_ERR(page)) {
printk(KERN_ERR "ufs_change_blocknr: "
"read_cache_page error: ino %lu, index: %lu\n",
mapping->host->i_ino, index);
goto out;
}
lock_page(page);
if (!PageUptodate(page) || PageError(page)) {
unlock_page(page);
page_cache_release(page);
printk(KERN_ERR "ufs_change_blocknr: "
"can not read page: ino %lu, index: %lu\n",
mapping->host->i_ino, index);
page = ERR_PTR(-EIO);
goto out;
}
}
if (unlikely(!page->mapping || !page_has_buffers(page))) {
unlock_page(page);
page_cache_release(page);
goto try_again;/*we really need these buffers*/
}
out:
return page;
}
/*
* Modify inode page cache in such way:
* have - blocks with b_blocknr equal to oldb...oldb+count-1
* get - blocks with b_blocknr equal to newb...newb+count-1
* also we suppose that oldb...oldb+count-1 blocks
* situated at the end of file.
*
* We can come here from ufs_writepage or ufs_prepare_write,
* locked_page is argument of these functions, so we already lock it.
*/
static void ufs_change_blocknr(struct inode *inode, unsigned int count,
unsigned int oldb, unsigned int newb,
struct page *locked_page)
{
unsigned int blk_per_page = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
sector_t baseblk;
struct address_space *mapping = inode->i_mapping;
pgoff_t index, cur_index = locked_page->index;
unsigned int i, j;
struct page *page;
struct buffer_head *head, *bh;
baseblk = ((i_size_read(inode) - 1) >> inode->i_blkbits) + 1 - count;
UFSD(("ENTER, ino %lu, count %u, oldb %u, newb %u\n",
inode->i_ino, count, oldb, newb));
BUG_ON(!PageLocked(locked_page));
for (i = 0; i < count; i += blk_per_page) {
index = (baseblk+i) >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
if (likely(cur_index != index)) {
page = ufs_get_locked_page(mapping, index);
if (IS_ERR(page))
continue;
} else
page = locked_page;
j = i;
head = page_buffers(page);
bh = head;
do {
if (likely(bh->b_blocknr == j + oldb && j < count)) {
unmap_underlying_metadata(bh->b_bdev,
bh->b_blocknr);
bh->b_blocknr = newb + j++;
mark_buffer_dirty(bh);
}
bh = bh->b_this_page;
} while (bh != head);
set_page_dirty(page);
unsigned ufs_new_fragments (struct inode * inode, __fs32 * p, unsigned fragment, if (likely(cur_index != index)) {
unsigned goal, unsigned count, int * err ) unlock_page(page);
page_cache_release(page);
}
}
UFSD(("EXIT\n"));
}
unsigned ufs_new_fragments(struct inode * inode, __fs32 * p, unsigned fragment,
unsigned goal, unsigned count, int * err, struct page *locked_page)
{ {
struct super_block * sb; struct super_block * sb;
struct ufs_sb_private_info * uspi; struct ufs_sb_private_info * uspi;
struct ufs_super_block_first * usb1; struct ufs_super_block_first * usb1;
struct buffer_head * bh; unsigned cgno, oldcount, newcount, tmp, request, result;
unsigned cgno, oldcount, newcount, tmp, request, i, result;
UFSD(("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count)) UFSD(("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count))
...@@ -343,24 +448,8 @@ unsigned ufs_new_fragments (struct inode * inode, __fs32 * p, unsigned fragment, ...@@ -343,24 +448,8 @@ unsigned ufs_new_fragments (struct inode * inode, __fs32 * p, unsigned fragment,
} }
result = ufs_alloc_fragments (inode, cgno, goal, request, err); result = ufs_alloc_fragments (inode, cgno, goal, request, err);
if (result) { if (result) {
for (i = 0; i < oldcount; i++) { ufs_change_blocknr(inode, oldcount, tmp, result, locked_page);
bh = sb_bread(sb, tmp + i);
if(bh)
{
clear_buffer_dirty(bh);
bh->b_blocknr = result + i;
mark_buffer_dirty (bh);
if (IS_SYNC(inode))
sync_dirty_buffer(bh);
brelse (bh);
}
else
{
printk(KERN_ERR "ufs_new_fragments: bread fail\n");
unlock_super(sb);
return 0;
}
}
*p = cpu_to_fs32(sb, result); *p = cpu_to_fs32(sb, result);
*err = 0; *err = 0;
inode->i_blocks += count << uspi->s_nspfshift; inode->i_blocks += count << uspi->s_nspfshift;
......
...@@ -172,9 +172,10 @@ static void ufs_clear_block(struct inode *inode, struct buffer_head *bh) ...@@ -172,9 +172,10 @@ static void ufs_clear_block(struct inode *inode, struct buffer_head *bh)
sync_dirty_buffer(bh); sync_dirty_buffer(bh);
} }
static struct buffer_head * ufs_inode_getfrag (struct inode *inode, static struct buffer_head *ufs_inode_getfrag(struct inode *inode,
unsigned int fragment, unsigned int new_fragment, unsigned int fragment, unsigned int new_fragment,
unsigned int required, int *err, int metadata, long *phys, int *new) unsigned int required, int *err, int metadata,
long *phys, int *new, struct page *locked_page)
{ {
struct ufs_inode_info *ufsi = UFS_I(inode); struct ufs_inode_info *ufsi = UFS_I(inode);
struct super_block * sb; struct super_block * sb;
...@@ -232,7 +233,8 @@ static struct buffer_head * ufs_inode_getfrag (struct inode *inode, ...@@ -232,7 +233,8 @@ static struct buffer_head * ufs_inode_getfrag (struct inode *inode,
if (lastblockoff) { if (lastblockoff) {
p2 = ufsi->i_u1.i_data + lastblock; p2 = ufsi->i_u1.i_data + lastblock;
tmp = ufs_new_fragments (inode, p2, lastfrag, tmp = ufs_new_fragments (inode, p2, lastfrag,
fs32_to_cpu(sb, *p2), uspi->s_fpb - lastblockoff, err); fs32_to_cpu(sb, *p2), uspi->s_fpb - lastblockoff,
err, locked_page);
if (!tmp) { if (!tmp) {
if (lastfrag != ufsi->i_lastfrag) if (lastfrag != ufsi->i_lastfrag)
goto repeat; goto repeat;
...@@ -244,14 +246,16 @@ static struct buffer_head * ufs_inode_getfrag (struct inode *inode, ...@@ -244,14 +246,16 @@ static struct buffer_head * ufs_inode_getfrag (struct inode *inode,
} }
goal = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock]) + uspi->s_fpb; goal = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock]) + uspi->s_fpb;
tmp = ufs_new_fragments (inode, p, fragment - blockoff, tmp = ufs_new_fragments (inode, p, fragment - blockoff,
goal, required + blockoff, err); goal, required + blockoff,
err, locked_page);
} }
/* /*
* We will extend last allocated block * We will extend last allocated block
*/ */
else if (lastblock == block) { else if (lastblock == block) {
tmp = ufs_new_fragments (inode, p, fragment - (blockoff - lastblockoff), tmp = ufs_new_fragments(inode, p, fragment - (blockoff - lastblockoff),
fs32_to_cpu(sb, *p), required + (blockoff - lastblockoff), err); fs32_to_cpu(sb, *p), required + (blockoff - lastblockoff),
err, locked_page);
} }
/* /*
* We will allocate new block before last allocated block * We will allocate new block before last allocated block
...@@ -259,8 +263,8 @@ static struct buffer_head * ufs_inode_getfrag (struct inode *inode, ...@@ -259,8 +263,8 @@ static struct buffer_head * ufs_inode_getfrag (struct inode *inode,
else /* (lastblock > block) */ { else /* (lastblock > block) */ {
if (lastblock && (tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock-1]))) if (lastblock && (tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock-1])))
goal = tmp + uspi->s_fpb; goal = tmp + uspi->s_fpb;
tmp = ufs_new_fragments (inode, p, fragment - blockoff, tmp = ufs_new_fragments(inode, p, fragment - blockoff,
goal, uspi->s_fpb, err); goal, uspi->s_fpb, err, locked_page);
} }
if (!tmp) { if (!tmp) {
if ((!blockoff && *p) || if ((!blockoff && *p) ||
...@@ -303,9 +307,10 @@ static struct buffer_head * ufs_inode_getfrag (struct inode *inode, ...@@ -303,9 +307,10 @@ static struct buffer_head * ufs_inode_getfrag (struct inode *inode,
*/ */
} }
static struct buffer_head * ufs_block_getfrag (struct inode *inode, static struct buffer_head *ufs_block_getfrag(struct inode *inode, struct buffer_head *bh,
struct buffer_head *bh, unsigned int fragment, unsigned int new_fragment, unsigned int fragment, unsigned int new_fragment,
unsigned int blocksize, int * err, int metadata, long *phys, int *new) unsigned int blocksize, int * err, int metadata,
long *phys, int *new, struct page *locked_page)
{ {
struct super_block * sb; struct super_block * sb;
struct ufs_sb_private_info * uspi; struct ufs_sb_private_info * uspi;
...@@ -350,7 +355,8 @@ static struct buffer_head * ufs_block_getfrag (struct inode *inode, ...@@ -350,7 +355,8 @@ static struct buffer_head * ufs_block_getfrag (struct inode *inode,
goal = tmp + uspi->s_fpb; goal = tmp + uspi->s_fpb;
else else
goal = bh->b_blocknr + uspi->s_fpb; goal = bh->b_blocknr + uspi->s_fpb;
tmp = ufs_new_fragments (inode, p, ufs_blknum(new_fragment), goal, uspi->s_fpb, err); tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal,
uspi->s_fpb, err, locked_page);
if (!tmp) { if (!tmp) {
if (fs32_to_cpu(sb, *p)) if (fs32_to_cpu(sb, *p))
goto repeat; goto repeat;
...@@ -424,15 +430,15 @@ int ufs_getfrag_block (struct inode *inode, sector_t fragment, struct buffer_hea ...@@ -424,15 +430,15 @@ int ufs_getfrag_block (struct inode *inode, sector_t fragment, struct buffer_hea
* it much more readable: * it much more readable:
*/ */
#define GET_INODE_DATABLOCK(x) \ #define GET_INODE_DATABLOCK(x) \
ufs_inode_getfrag(inode, x, fragment, 1, &err, 0, &phys, &new) ufs_inode_getfrag(inode, x, fragment, 1, &err, 0, &phys, &new, bh_result->b_page)
#define GET_INODE_PTR(x) \ #define GET_INODE_PTR(x) \
ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, 1, NULL, NULL) ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, 1, NULL, NULL, bh_result->b_page)
#define GET_INDIRECT_DATABLOCK(x) \ #define GET_INDIRECT_DATABLOCK(x) \
ufs_block_getfrag(inode, bh, x, fragment, sb->s_blocksize, \ ufs_block_getfrag(inode, bh, x, fragment, sb->s_blocksize, \
&err, 0, &phys, &new); &err, 0, &phys, &new, bh_result->b_page);
#define GET_INDIRECT_PTR(x) \ #define GET_INDIRECT_PTR(x) \
ufs_block_getfrag(inode, bh, x, fragment, sb->s_blocksize, \ ufs_block_getfrag(inode, bh, x, fragment, sb->s_blocksize, \
&err, 1, NULL, NULL); &err, 1, NULL, NULL, bh_result->b_page);
if (ptr < UFS_NDIR_FRAGMENT) { if (ptr < UFS_NDIR_FRAGMENT) {
bh = GET_INODE_DATABLOCK(ptr); bh = GET_INODE_DATABLOCK(ptr);
......
...@@ -875,7 +875,8 @@ struct ufs_super_block_third { ...@@ -875,7 +875,8 @@ struct ufs_super_block_third {
/* balloc.c */ /* balloc.c */
extern void ufs_free_fragments (struct inode *, unsigned, unsigned); extern void ufs_free_fragments (struct inode *, unsigned, unsigned);
extern void ufs_free_blocks (struct inode *, unsigned, unsigned); extern void ufs_free_blocks (struct inode *, unsigned, unsigned);
extern unsigned ufs_new_fragments (struct inode *, __fs32 *, unsigned, unsigned, unsigned, int *); extern unsigned ufs_new_fragments(struct inode *, __fs32 *, unsigned, unsigned,
unsigned, int *, struct page *);
/* cylinder.c */ /* cylinder.c */
extern struct ufs_cg_private_info * ufs_load_cylinder (struct super_block *, unsigned); extern struct ufs_cg_private_info * ufs_load_cylinder (struct super_block *, unsigned);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册