提交 b47b2478 编写于 作者: M Mingming Cao 提交者: Linus Torvalds

[PATCH] ext3_get_blocks: multiple block allocation

Add support for multiple block allocation in ext3-get-blocks().

Look up the disk block mapping and count the total number of blocks to
allocate, then pass it to ext3_new_block(), where the real block allocation is
performed.  Once multiple blocks are allocated, prepare the branch with those
just allocated blocks info and finally splice the whole branch into the block
mapping tree.
Signed-off-by: NMingming Cao <cmm@us.ibm.com>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 89747d36
...@@ -235,16 +235,6 @@ void ext3_delete_inode (struct inode * inode) ...@@ -235,16 +235,6 @@ void ext3_delete_inode (struct inode * inode)
clear_inode(inode); /* We must guarantee clearing of inode... */ clear_inode(inode); /* We must guarantee clearing of inode... */
} }
static int ext3_alloc_block (handle_t *handle,
struct inode * inode, unsigned long goal, int *err)
{
unsigned long result;
result = ext3_new_block(handle, inode, goal, err);
return result;
}
typedef struct { typedef struct {
__le32 *p; __le32 *p;
__le32 key; __le32 key;
...@@ -476,15 +466,115 @@ static unsigned long ext3_find_goal(struct inode *inode, long block, ...@@ -476,15 +466,115 @@ static unsigned long ext3_find_goal(struct inode *inode, long block,
return ext3_find_near(inode, partial); return ext3_find_near(inode, partial);
} }
/**
* ext3_blks_to_allocate: Look up the block map and count the number
* of direct blocks need to be allocated for the given branch.
*
* @branch: chain of indirect blocks
* @k: number of blocks need for indirect blocks
* @blks: number of data blocks to be mapped.
* @blocks_to_boundary: the offset in the indirect block
*
* return the total number of blocks to be allocate, including the
* direct and indirect blocks.
*/
static int
ext3_blks_to_allocate(Indirect * branch, int k, unsigned long blks,
int blocks_to_boundary)
{
unsigned long count = 0;
/*
* Simple case, [t,d]Indirect block(s) has not allocated yet
* then it's clear blocks on that path have not allocated
*/
if (k > 0) {
/* right now don't hanel cross boundary allocation */
if (blks < blocks_to_boundary + 1)
count += blks;
else
count += blocks_to_boundary + 1;
return count;
}
count++;
while (count < blks && count <= blocks_to_boundary &&
le32_to_cpu(*(branch[0].p + count)) == 0) {
count++;
}
return count;
}
/**
* ext3_alloc_blocks: multiple allocate blocks needed for a branch
* @indirect_blks: the number of blocks need to allocate for indirect
* blocks
*
* @new_blocks: on return it will store the new block numbers for
* the indirect blocks(if needed) and the first direct block,
* @blks: on return it will store the total number of allocated
* direct blocks
*/
static int ext3_alloc_blocks(handle_t *handle, struct inode *inode,
unsigned long goal, int indirect_blks, int blks,
unsigned long long new_blocks[4], int *err)
{
int target, i;
unsigned long count = 0;
int index = 0;
unsigned long current_block = 0;
int ret = 0;
/*
* Here we try to allocate the requested multiple blocks at once,
* on a best-effort basis.
* To build a branch, we should allocate blocks for
* the indirect blocks(if not allocated yet), and at least
* the first direct block of this branch. That's the
* minimum number of blocks need to allocate(required)
*/
target = blks + indirect_blks;
while (1) {
count = target;
/* allocating blocks for indirect blocks and direct blocks */
current_block = ext3_new_blocks(handle, inode, goal, &count, err);
if (*err)
goto failed_out;
target -= count;
/* allocate blocks for indirect blocks */
while (index < indirect_blks && count) {
new_blocks[index++] = current_block++;
count--;
}
if (count > 0)
break;
}
/* save the new block number for the first direct block */
new_blocks[index] = current_block;
/* total number of blocks allocated for direct blocks */
ret = count;
*err = 0;
return ret;
failed_out:
for (i = 0; i <index; i++)
ext3_free_blocks(handle, inode, new_blocks[i], 1);
return ret;
}
/** /**
* ext3_alloc_branch - allocate and set up a chain of blocks. * ext3_alloc_branch - allocate and set up a chain of blocks.
* @inode: owner * @inode: owner
* @num: depth of the chain (number of blocks to allocate) * @indirect_blks: number of allocated indirect blocks
* @blks: number of allocated direct blocks
* @offsets: offsets (in the blocks) to store the pointers to next. * @offsets: offsets (in the blocks) to store the pointers to next.
* @branch: place to store the chain in. * @branch: place to store the chain in.
* *
* This function allocates @num blocks, zeroes out all but the last one, * This function allocates blocks, zeroes out all but the last one,
* links them into chain and (if we are synchronous) writes them to disk. * links them into chain and (if we are synchronous) writes them to disk.
* In other words, it prepares a branch that can be spliced onto the * In other words, it prepares a branch that can be spliced onto the
* inode. It stores the information about that chain in the branch[], in * inode. It stores the information about that chain in the branch[], in
...@@ -503,71 +593,79 @@ static unsigned long ext3_find_goal(struct inode *inode, long block, ...@@ -503,71 +593,79 @@ static unsigned long ext3_find_goal(struct inode *inode, long block,
*/ */
static int ext3_alloc_branch(handle_t *handle, struct inode *inode, static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
int num, int indirect_blks, int *blks, unsigned long goal,
unsigned long goal, int *offsets, Indirect *branch)
int *offsets,
Indirect *branch)
{ {
int blocksize = inode->i_sb->s_blocksize; int blocksize = inode->i_sb->s_blocksize;
int n = 0, keys = 0; int i, n = 0;
int err = 0; int err = 0;
int i; struct buffer_head *bh;
int parent = ext3_alloc_block(handle, inode, goal, &err); int num;
unsigned long long new_blocks[4];
branch[0].key = cpu_to_le32(parent); unsigned long long current_block;
if (parent) {
for (n = 1; n < num; n++) {
struct buffer_head *bh;
/* Allocate the next block */
int nr = ext3_alloc_block(handle, inode, parent, &err);
if (!nr)
break;
branch[n].key = cpu_to_le32(nr);
/* num = ext3_alloc_blocks(handle, inode, goal, indirect_blks,
* Get buffer_head for parent block, zero it out *blks, new_blocks, &err);
* and set the pointer to new one, then send if (err)
* parent to disk. return err;
*/
bh = sb_getblk(inode->i_sb, parent);
if (!bh)
break;
keys = n+1;
branch[n].bh = bh;
lock_buffer(bh);
BUFFER_TRACE(bh, "call get_create_access");
err = ext3_journal_get_create_access(handle, bh);
if (err) {
unlock_buffer(bh);
brelse(bh);
break;
}
memset(bh->b_data, 0, blocksize); branch[0].key = cpu_to_le32(new_blocks[0]);
branch[n].p = (__le32*) bh->b_data + offsets[n]; /*
*branch[n].p = branch[n].key; * metadata blocks and data blocks are allocated.
BUFFER_TRACE(bh, "marking uptodate"); */
set_buffer_uptodate(bh); for (n = 1; n <= indirect_blks; n++) {
/*
* Get buffer_head for parent block, zero it out
* and set the pointer to new one, then send
* parent to disk.
*/
bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
branch[n].bh = bh;
lock_buffer(bh);
BUFFER_TRACE(bh, "call get_create_access");
err = ext3_journal_get_create_access(handle, bh);
if (err) {
unlock_buffer(bh); unlock_buffer(bh);
brelse(bh);
goto failed;
}
BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); memset(bh->b_data, 0, blocksize);
err = ext3_journal_dirty_metadata(handle, bh); branch[n].p = (__le32 *) bh->b_data + offsets[n];
if (err) branch[n].key = cpu_to_le32(new_blocks[n]);
break; *branch[n].p = branch[n].key;
if ( n == indirect_blks) {
parent = nr; current_block = new_blocks[n];
/*
* End of chain, update the last new metablock of
* the chain to point to the new allocated
* data blocks numbers
*/
for (i=1; i < num; i++)
*(branch[n].p + i) = cpu_to_le32(++current_block);
} }
} BUFFER_TRACE(bh, "marking uptodate");
if (n == num) set_buffer_uptodate(bh);
return 0; unlock_buffer(bh);
BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
err = ext3_journal_dirty_metadata(handle, bh);
if (err)
goto failed;
}
*blks = num;
return err;
failed:
/* Allocation failed, free what we already allocated */ /* Allocation failed, free what we already allocated */
for (i = 1; i < keys; i++) { for (i = 1; i <= n ; i++) {
BUFFER_TRACE(branch[i].bh, "call journal_forget"); BUFFER_TRACE(branch[i].bh, "call journal_forget");
ext3_journal_forget(handle, branch[i].bh); ext3_journal_forget(handle, branch[i].bh);
} }
for (i = 0; i < keys; i++) for (i = 0; i <indirect_blks; i++)
ext3_free_blocks(handle, inode, le32_to_cpu(branch[i].key), 1); ext3_free_blocks(handle, inode, new_blocks[i], 1);
ext3_free_blocks(handle, inode, new_blocks[i], num);
return err; return err;
} }
...@@ -578,7 +676,8 @@ static int ext3_alloc_branch(handle_t *handle, struct inode *inode, ...@@ -578,7 +676,8 @@ static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
* @chain: chain of indirect blocks (with a missing link - see * @chain: chain of indirect blocks (with a missing link - see
* ext3_alloc_branch) * ext3_alloc_branch)
* @where: location of missing link * @where: location of missing link
* @num: number of blocks we are adding * @num: number of indirect blocks we are adding
* @blks: number of direct blocks we are adding
* *
* This function fills the missing link and does all housekeeping needed in * This function fills the missing link and does all housekeeping needed in
* inode (->i_blocks, etc.). In case of success we end up with the full * inode (->i_blocks, etc.). In case of success we end up with the full
...@@ -586,12 +685,12 @@ static int ext3_alloc_branch(handle_t *handle, struct inode *inode, ...@@ -586,12 +685,12 @@ static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
*/ */
static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block, static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
Indirect chain[4], Indirect *where, int num) Indirect *where, int num, int blks)
{ {
int i; int i;
int err = 0; int err = 0;
struct ext3_block_alloc_info *block_i = EXT3_I(inode)->i_block_alloc_info; struct ext3_block_alloc_info *block_i = EXT3_I(inode)->i_block_alloc_info;
unsigned long current_block;
/* /*
* If we're splicing into a [td]indirect block (as opposed to the * If we're splicing into a [td]indirect block (as opposed to the
* inode) then we need to get write access to the [td]indirect block * inode) then we need to get write access to the [td]indirect block
...@@ -606,6 +705,13 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block, ...@@ -606,6 +705,13 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
/* That's it */ /* That's it */
*where->p = where->key; *where->p = where->key;
/* update host bufferhead or inode to point to
* more just allocated direct blocks blocks */
if (num == 0 && blks > 1) {
current_block = le32_to_cpu(where->key + 1);
for (i = 1; i < blks; i++)
*(where->p + i ) = cpu_to_le32(current_block++);
}
/* /*
* update the most recently allocated logical & physical block * update the most recently allocated logical & physical block
...@@ -613,8 +719,8 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block, ...@@ -613,8 +719,8 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
* allocation * allocation
*/ */
if (block_i) { if (block_i) {
block_i->last_alloc_logical_block = block; block_i->last_alloc_logical_block = block + blks - 1;
block_i->last_alloc_physical_block = le32_to_cpu(where[num-1].key); block_i->last_alloc_physical_block = le32_to_cpu(where[num].key + blks - 1);
} }
/* We are done with atomic stuff, now do the rest of housekeeping */ /* We are done with atomic stuff, now do the rest of housekeeping */
...@@ -647,10 +753,13 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block, ...@@ -647,10 +753,13 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
return err; return err;
err_out: err_out:
for (i = 1; i < num; i++) { for (i = 1; i <= num; i++) {
BUFFER_TRACE(where[i].bh, "call journal_forget"); BUFFER_TRACE(where[i].bh, "call journal_forget");
ext3_journal_forget(handle, where[i].bh); ext3_journal_forget(handle, where[i].bh);
ext3_free_blocks(handle, inode, le32_to_cpu(where[i-1].key), 1);
} }
ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
return err; return err;
} }
...@@ -684,7 +793,7 @@ ext3_get_blocks_handle(handle_t *handle, struct inode *inode, sector_t iblock, ...@@ -684,7 +793,7 @@ ext3_get_blocks_handle(handle_t *handle, struct inode *inode, sector_t iblock,
Indirect chain[4]; Indirect chain[4];
Indirect *partial; Indirect *partial;
unsigned long goal; unsigned long goal;
int left; int indirect_blks;
int blocks_to_boundary = 0; int blocks_to_boundary = 0;
int depth; int depth;
struct ext3_inode_info *ei = EXT3_I(inode); struct ext3_inode_info *ei = EXT3_I(inode);
...@@ -772,12 +881,19 @@ ext3_get_blocks_handle(handle_t *handle, struct inode *inode, sector_t iblock, ...@@ -772,12 +881,19 @@ ext3_get_blocks_handle(handle_t *handle, struct inode *inode, sector_t iblock,
goal = ext3_find_goal(inode, iblock, chain, partial); goal = ext3_find_goal(inode, iblock, chain, partial);
left = (chain + depth) - partial; /* the number of blocks need to allocate for [d,t]indirect blocks */
indirect_blks = (chain + depth) - partial - 1;
/*
* Next look up the indirect map to count the totoal number of
* direct blocks to allocate for this branch.
*/
count = ext3_blks_to_allocate(partial, indirect_blks,
maxblocks, blocks_to_boundary);
/* /*
* Block out ext3_truncate while we alter the tree * Block out ext3_truncate while we alter the tree
*/ */
err = ext3_alloc_branch(handle, inode, left, goal, err = ext3_alloc_branch(handle, inode, indirect_blks, &count, goal,
offsets + (partial - chain), partial); offsets + (partial - chain), partial);
/* /*
...@@ -788,8 +904,8 @@ ext3_get_blocks_handle(handle_t *handle, struct inode *inode, sector_t iblock, ...@@ -788,8 +904,8 @@ ext3_get_blocks_handle(handle_t *handle, struct inode *inode, sector_t iblock,
* may need to return -EAGAIN upwards in the worst case. --sct * may need to return -EAGAIN upwards in the worst case. --sct
*/ */
if (!err) if (!err)
err = ext3_splice_branch(handle, inode, iblock, chain, err = ext3_splice_branch(handle, inode, iblock,
partial, left); partial, indirect_blks, count);
/* /*
* i_disksize growing is protected by truncate_mutex. Don't forget to * i_disksize growing is protected by truncate_mutex. Don't forget to
* protect it if you're about to implement concurrent * protect it if you're about to implement concurrent
...@@ -824,8 +940,8 @@ ext3_get_blocks_handle(handle_t *handle, struct inode *inode, sector_t iblock, ...@@ -824,8 +940,8 @@ ext3_get_blocks_handle(handle_t *handle, struct inode *inode, sector_t iblock,
static int static int
ext3_direct_io_get_blocks(struct inode *inode, sector_t iblock, ext3_direct_io_get_blocks(struct inode *inode, sector_t iblock,
unsigned long max_blocks, struct buffer_head *bh_result, unsigned long max_blocks,
int create) struct buffer_head *bh_result, int create)
{ {
handle_t *handle = journal_current_handle(); handle_t *handle = journal_current_handle();
int ret = 0; int ret = 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册