提交 40f76580 编写于 作者: C Chris Mason

Btrfs: split up __extent_writepage to lower stack usage

__extent_writepage has two unrelated parts.  First it does the delayed
allocation dance and second it does the mapping and IO for the page
we're actually writing.

This splits it up into those two parts so the stack from one doesn't
impact the stack from the other.
Signed-off-by: NChris Mason <clm@fb.com>
上级 fc4adbff
...@@ -3101,143 +3101,130 @@ static noinline void update_nr_written(struct page *page, ...@@ -3101,143 +3101,130 @@ static noinline void update_nr_written(struct page *page,
} }
/* /*
* the writepage semantics are similar to regular writepage. extent * helper for __extent_writepage, doing all of the delayed allocation setup.
* records are inserted to lock ranges in the tree, and as dirty areas *
* are found, they are marked writeback. Then the lock bits are removed * This returns 1 if our fill_delalloc function did all the work required
* and the end_io handler clears the writeback ranges * to write the page (copy into inline extent). In this case the IO has
* been started and the page is already unlocked.
*
* This returns 0 if all went well (page still locked)
* This returns < 0 if there were errors (page still locked)
*/ */
static int __extent_writepage(struct page *page, struct writeback_control *wbc, static noinline_for_stack int writepage_delalloc(struct inode *inode,
void *data) struct page *page, struct writeback_control *wbc,
struct extent_page_data *epd,
u64 delalloc_start,
unsigned long *nr_written)
{
struct extent_io_tree *tree = epd->tree;
u64 page_end = delalloc_start + PAGE_CACHE_SIZE - 1;
u64 nr_delalloc;
u64 delalloc_to_write = 0;
u64 delalloc_end = 0;
int ret;
int page_started = 0;
if (epd->extent_locked || !tree->ops || !tree->ops->fill_delalloc)
return 0;
while (delalloc_end < page_end) {
nr_delalloc = find_lock_delalloc_range(inode, tree,
page,
&delalloc_start,
&delalloc_end,
128 * 1024 * 1024);
if (nr_delalloc == 0) {
delalloc_start = delalloc_end + 1;
continue;
}
ret = tree->ops->fill_delalloc(inode, page,
delalloc_start,
delalloc_end,
&page_started,
nr_written);
/* File system has been set read-only */
if (ret) {
SetPageError(page);
/* fill_delalloc should be return < 0 for error
* but just in case, we use > 0 here meaning the
* IO is started, so we don't want to return > 0
* unless things are going well.
*/
ret = ret < 0 ? ret : -EIO;
goto done;
}
/*
* delalloc_end is already one less than the total
* length, so we don't subtract one from
* PAGE_CACHE_SIZE
*/
delalloc_to_write += (delalloc_end - delalloc_start +
PAGE_CACHE_SIZE) >>
PAGE_CACHE_SHIFT;
delalloc_start = delalloc_end + 1;
}
if (wbc->nr_to_write < delalloc_to_write) {
int thresh = 8192;
if (delalloc_to_write < thresh * 2)
thresh = delalloc_to_write;
wbc->nr_to_write = min_t(u64, delalloc_to_write,
thresh);
}
/* did the fill delalloc function already unlock and start
* the IO?
*/
if (page_started) {
/*
* we've unlocked the page, so we can't update
* the mapping's writeback index, just update
* nr_to_write.
*/
wbc->nr_to_write -= *nr_written;
return 1;
}
ret = 0;
done:
return ret;
}
/*
* helper for __extent_writepage. This calls the writepage start hooks,
* and does the loop to map the page into extents and bios.
*
* We return 1 if the IO is started and the page is unlocked,
* 0 if all went well (page still locked)
* < 0 if there were errors (page still locked)
*/
static noinline_for_stack int __extent_writepage_io(struct inode *inode,
struct page *page,
struct writeback_control *wbc,
struct extent_page_data *epd,
loff_t i_size,
unsigned long nr_written,
int write_flags, int *nr_ret)
{ {
struct inode *inode = page->mapping->host;
struct extent_page_data *epd = data;
struct extent_io_tree *tree = epd->tree; struct extent_io_tree *tree = epd->tree;
u64 start = page_offset(page); u64 start = page_offset(page);
u64 delalloc_start;
u64 page_end = start + PAGE_CACHE_SIZE - 1; u64 page_end = start + PAGE_CACHE_SIZE - 1;
u64 end; u64 end;
u64 cur = start; u64 cur = start;
u64 extent_offset; u64 extent_offset;
u64 last_byte = i_size_read(inode);
u64 block_start; u64 block_start;
u64 iosize; u64 iosize;
sector_t sector; sector_t sector;
struct extent_state *cached_state = NULL; struct extent_state *cached_state = NULL;
struct extent_map *em; struct extent_map *em;
struct block_device *bdev; struct block_device *bdev;
int ret;
int nr = 0;
size_t pg_offset = 0; size_t pg_offset = 0;
size_t blocksize; size_t blocksize;
loff_t i_size = i_size_read(inode); int ret = 0;
unsigned long end_index = i_size >> PAGE_CACHE_SHIFT; int nr = 0;
u64 nr_delalloc; bool compressed;
u64 delalloc_end;
int page_started;
int compressed;
int write_flags;
unsigned long nr_written = 0;
bool fill_delalloc = true;
if (wbc->sync_mode == WB_SYNC_ALL)
write_flags = WRITE_SYNC;
else
write_flags = WRITE;
trace___extent_writepage(page, inode, wbc);
WARN_ON(!PageLocked(page));
ClearPageError(page);
pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
if (page->index > end_index ||
(page->index == end_index && !pg_offset)) {
page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
unlock_page(page);
return 0;
}
if (page->index == end_index) {
char *userpage;
userpage = kmap_atomic(page);
memset(userpage + pg_offset, 0,
PAGE_CACHE_SIZE - pg_offset);
kunmap_atomic(userpage);
flush_dcache_page(page);
}
pg_offset = 0;
set_page_extent_mapped(page);
if (!tree->ops || !tree->ops->fill_delalloc)
fill_delalloc = false;
delalloc_start = start;
delalloc_end = 0;
page_started = 0;
if (!epd->extent_locked && fill_delalloc) {
u64 delalloc_to_write = 0;
/*
* make sure the wbc mapping index is at least updated
* to this page.
*/
update_nr_written(page, wbc, 0);
while (delalloc_end < page_end) {
nr_delalloc = find_lock_delalloc_range(inode, tree,
page,
&delalloc_start,
&delalloc_end,
128 * 1024 * 1024);
if (nr_delalloc == 0) {
delalloc_start = delalloc_end + 1;
continue;
}
ret = tree->ops->fill_delalloc(inode, page,
delalloc_start,
delalloc_end,
&page_started,
&nr_written);
/* File system has been set read-only */
if (ret) {
SetPageError(page);
goto done;
}
/*
* delalloc_end is already one less than the total
* length, so we don't subtract one from
* PAGE_CACHE_SIZE
*/
delalloc_to_write += (delalloc_end - delalloc_start +
PAGE_CACHE_SIZE) >>
PAGE_CACHE_SHIFT;
delalloc_start = delalloc_end + 1;
}
if (wbc->nr_to_write < delalloc_to_write) {
int thresh = 8192;
if (delalloc_to_write < thresh * 2)
thresh = delalloc_to_write;
wbc->nr_to_write = min_t(u64, delalloc_to_write,
thresh);
}
/* did the fill delalloc function already unlock and start
* the IO?
*/
if (page_started) {
ret = 0;
/*
* we've unlocked the page, so we can't update
* the mapping's writeback index, just update
* nr_to_write.
*/
wbc->nr_to_write -= nr_written;
goto done_unlocked;
}
}
if (tree->ops && tree->ops->writepage_start_hook) { if (tree->ops && tree->ops->writepage_start_hook) {
ret = tree->ops->writepage_start_hook(page, start, ret = tree->ops->writepage_start_hook(page, start,
page_end); page_end);
...@@ -3247,9 +3234,10 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, ...@@ -3247,9 +3234,10 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
wbc->pages_skipped++; wbc->pages_skipped++;
else else
redirty_page_for_writepage(wbc, page); redirty_page_for_writepage(wbc, page);
update_nr_written(page, wbc, nr_written); update_nr_written(page, wbc, nr_written);
unlock_page(page); unlock_page(page);
ret = 0; ret = 1;
goto done_unlocked; goto done_unlocked;
} }
} }
...@@ -3261,7 +3249,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, ...@@ -3261,7 +3249,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
update_nr_written(page, wbc, nr_written + 1); update_nr_written(page, wbc, nr_written + 1);
end = page_end; end = page_end;
if (last_byte <= start) { if (i_size <= start) {
if (tree->ops && tree->ops->writepage_end_io_hook) if (tree->ops && tree->ops->writepage_end_io_hook)
tree->ops->writepage_end_io_hook(page, start, tree->ops->writepage_end_io_hook(page, start,
page_end, NULL, 1); page_end, NULL, 1);
...@@ -3271,7 +3259,8 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, ...@@ -3271,7 +3259,8 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
blocksize = inode->i_sb->s_blocksize; blocksize = inode->i_sb->s_blocksize;
while (cur <= end) { while (cur <= end) {
if (cur >= last_byte) { u64 em_end;
if (cur >= i_size) {
if (tree->ops && tree->ops->writepage_end_io_hook) if (tree->ops && tree->ops->writepage_end_io_hook)
tree->ops->writepage_end_io_hook(page, cur, tree->ops->writepage_end_io_hook(page, cur,
page_end, NULL, 1); page_end, NULL, 1);
...@@ -3286,9 +3275,10 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, ...@@ -3286,9 +3275,10 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
} }
extent_offset = cur - em->start; extent_offset = cur - em->start;
BUG_ON(extent_map_end(em) <= cur); em_end = extent_map_end(em);
BUG_ON(em_end <= cur);
BUG_ON(end < cur); BUG_ON(end < cur);
iosize = min(extent_map_end(em) - cur, end - cur + 1); iosize = min(em_end - cur, end - cur + 1);
iosize = ALIGN(iosize, blocksize); iosize = ALIGN(iosize, blocksize);
sector = (em->block_start + extent_offset) >> 9; sector = (em->block_start + extent_offset) >> 9;
bdev = em->bdev; bdev = em->bdev;
...@@ -3324,13 +3314,6 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, ...@@ -3324,13 +3314,6 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
pg_offset += iosize; pg_offset += iosize;
continue; continue;
} }
/* leave this out until we have a page_mkwrite call */
if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
EXTENT_DIRTY, 0, NULL)) {
cur = cur + iosize;
pg_offset += iosize;
continue;
}
if (tree->ops && tree->ops->writepage_io_hook) { if (tree->ops && tree->ops->writepage_io_hook) {
ret = tree->ops->writepage_io_hook(page, cur, ret = tree->ops->writepage_io_hook(page, cur,
...@@ -3341,7 +3324,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, ...@@ -3341,7 +3324,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
if (ret) { if (ret) {
SetPageError(page); SetPageError(page);
} else { } else {
unsigned long max_nr = end_index + 1; unsigned long max_nr = (i_size >> PAGE_CACHE_SHIFT) + 1;
set_range_writeback(tree, cur, cur + iosize - 1); set_range_writeback(tree, cur, cur + iosize - 1);
if (!PageWriteback(page)) { if (!PageWriteback(page)) {
...@@ -3362,6 +3345,81 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, ...@@ -3362,6 +3345,81 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
pg_offset += iosize; pg_offset += iosize;
nr++; nr++;
} }
done:
*nr_ret = nr;
done_unlocked:
/* drop our reference on any cached states */
free_extent_state(cached_state);
return ret;
}
/*
* the writepage semantics are similar to regular writepage. extent
* records are inserted to lock ranges in the tree, and as dirty areas
* are found, they are marked writeback. Then the lock bits are removed
* and the end_io handler clears the writeback ranges
*/
static int __extent_writepage(struct page *page, struct writeback_control *wbc,
void *data)
{
struct inode *inode = page->mapping->host;
struct extent_page_data *epd = data;
u64 start = page_offset(page);
u64 page_end = start + PAGE_CACHE_SIZE - 1;
int ret;
int nr = 0;
size_t pg_offset = 0;
loff_t i_size = i_size_read(inode);
unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
int write_flags;
unsigned long nr_written = 0;
if (wbc->sync_mode == WB_SYNC_ALL)
write_flags = WRITE_SYNC;
else
write_flags = WRITE;
trace___extent_writepage(page, inode, wbc);
WARN_ON(!PageLocked(page));
ClearPageError(page);
pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
if (page->index > end_index ||
(page->index == end_index && !pg_offset)) {
page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
unlock_page(page);
return 0;
}
if (page->index == end_index) {
char *userpage;
userpage = kmap_atomic(page);
memset(userpage + pg_offset, 0,
PAGE_CACHE_SIZE - pg_offset);
kunmap_atomic(userpage);
flush_dcache_page(page);
}
pg_offset = 0;
set_page_extent_mapped(page);
ret = writepage_delalloc(inode, page, wbc, epd, start, &nr_written);
if (ret == 1)
goto done_unlocked;
if (ret)
goto done;
ret = __extent_writepage_io(inode, page, wbc, epd,
i_size, nr_written, write_flags, &nr);
if (ret == 1)
goto done_unlocked;
done: done:
if (nr == 0) { if (nr == 0) {
/* make sure the mapping tag for page dirty gets cleared */ /* make sure the mapping tag for page dirty gets cleared */
...@@ -3373,12 +3431,10 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, ...@@ -3373,12 +3431,10 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
end_extent_writepage(page, ret, start, page_end); end_extent_writepage(page, ret, start, page_end);
} }
unlock_page(page); unlock_page(page);
return ret;
done_unlocked: done_unlocked:
return 0;
/* drop our reference on any cached states */
free_extent_state(cached_state);
return ret;
} }
static int eb_wait(void *word) static int eb_wait(void *word)
......
...@@ -125,7 +125,7 @@ static int btrfs_init_inode_security(struct btrfs_trans_handle *trans, ...@@ -125,7 +125,7 @@ static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
* the btree. The caller should have done a btrfs_drop_extents so that * the btree. The caller should have done a btrfs_drop_extents so that
* no overlapping inline items exist in the btree * no overlapping inline items exist in the btree
*/ */
static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, static int insert_inline_extent(struct btrfs_trans_handle *trans,
struct btrfs_path *path, int extent_inserted, struct btrfs_path *path, int extent_inserted,
struct btrfs_root *root, struct inode *inode, struct btrfs_root *root, struct inode *inode,
u64 start, size_t size, size_t compressed_size, u64 start, size_t size, size_t compressed_size,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册