提交 5069120b 编写于 作者: M Mark Fasheh

ocfs2: remove ocfs2_prepare_write() and ocfs2_commit_write()

These are no longer used, and can't handle file systems with sparse file
allocation.
Signed-off-by: NMark Fasheh <mark.fasheh@oracle.com>
上级 9517bac6
...@@ -277,8 +277,11 @@ static int ocfs2_writepage(struct page *page, struct writeback_control *wbc) ...@@ -277,8 +277,11 @@ static int ocfs2_writepage(struct page *page, struct writeback_control *wbc)
return ret; return ret;
} }
/* This can also be called from ocfs2_write_zero_page() which has done /*
* it's own cluster locking. */ * This is called from ocfs2_write_zero_page() which has handled it's
* own cluster locking and has ensured allocation exists for those
* blocks to be written.
*/
int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page, int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page,
unsigned from, unsigned to) unsigned from, unsigned to)
{ {
...@@ -293,33 +296,6 @@ int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page, ...@@ -293,33 +296,6 @@ int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page,
return ret; return ret;
} }
/*
* ocfs2_prepare_write() can be an outer-most ocfs2 call when it is called
* from loopback. It must be able to perform its own locking around
* ocfs2_get_block().
*/
static int ocfs2_prepare_write(struct file *file, struct page *page,
unsigned from, unsigned to)
{
struct inode *inode = page->mapping->host;
int ret;
mlog_entry("(0x%p, 0x%p, %u, %u)\n", file, page, from, to);
ret = ocfs2_meta_lock_with_page(inode, NULL, 0, page);
if (ret != 0) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_prepare_write_nolock(inode, page, from, to);
ocfs2_meta_unlock(inode, 0);
out:
mlog_exit(ret);
return ret;
}
/* Taken from ext3. We don't necessarily need the full blown /* Taken from ext3. We don't necessarily need the full blown
* functionality yet, but IMHO it's better to cut and paste the whole * functionality yet, but IMHO it's better to cut and paste the whole
* thing so we can avoid introducing our own bugs (and easily pick up * thing so we can avoid introducing our own bugs (and easily pick up
...@@ -389,95 +365,6 @@ handle_t *ocfs2_start_walk_page_trans(struct inode *inode, ...@@ -389,95 +365,6 @@ handle_t *ocfs2_start_walk_page_trans(struct inode *inode,
return handle; return handle;
} }
static int ocfs2_commit_write(struct file *file, struct page *page,
unsigned from, unsigned to)
{
int ret;
struct buffer_head *di_bh = NULL;
struct inode *inode = page->mapping->host;
handle_t *handle = NULL;
struct ocfs2_dinode *di;
mlog_entry("(0x%p, 0x%p, %u, %u)\n", file, page, from, to);
/* NOTE: ocfs2_file_aio_write has ensured that it's safe for
* us to continue here without rechecking the I/O against
* changed inode values.
*
* 1) We're currently holding the inode alloc lock, so no
* nodes can change it underneath us.
*
* 2) We've had to take the metadata lock at least once
* already to check for extending writes, suid removal, etc.
* The meta data update code then ensures that we don't get a
* stale inode allocation image (i_size, i_clusters, etc).
*/
ret = ocfs2_meta_lock_with_page(inode, &di_bh, 1, page);
if (ret != 0) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_data_lock_with_page(inode, 1, page);
if (ret != 0) {
mlog_errno(ret);
goto out_unlock_meta;
}
handle = ocfs2_start_walk_page_trans(inode, page, from, to);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
goto out_unlock_data;
}
/* Mark our buffer early. We'd rather catch this error up here
* as opposed to after a successful commit_write which would
* require us to set back inode->i_size. */
ret = ocfs2_journal_access(handle, inode, di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret < 0) {
mlog_errno(ret);
goto out_commit;
}
/* might update i_size */
ret = generic_commit_write(file, page, from, to);
if (ret < 0) {
mlog_errno(ret);
goto out_commit;
}
di = (struct ocfs2_dinode *)di_bh->b_data;
/* ocfs2_mark_inode_dirty() is too heavy to use here. */
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
inode->i_blocks = ocfs2_align_bytes_to_sectors((u64)(i_size_read(inode)));
di->i_size = cpu_to_le64((u64)i_size_read(inode));
ret = ocfs2_journal_dirty(handle, di_bh);
if (ret < 0) {
mlog_errno(ret);
goto out_commit;
}
out_commit:
ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
out_unlock_data:
ocfs2_data_unlock(inode, 1);
out_unlock_meta:
ocfs2_meta_unlock(inode, 1);
out:
if (di_bh)
brelse(di_bh);
mlog_exit(ret);
return ret;
}
static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block) static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
{ {
sector_t status; sector_t status;
...@@ -1323,8 +1210,6 @@ ssize_t ocfs2_buffered_write_cluster(struct file *file, loff_t pos, ...@@ -1323,8 +1210,6 @@ ssize_t ocfs2_buffered_write_cluster(struct file *file, loff_t pos,
const struct address_space_operations ocfs2_aops = { const struct address_space_operations ocfs2_aops = {
.readpage = ocfs2_readpage, .readpage = ocfs2_readpage,
.writepage = ocfs2_writepage, .writepage = ocfs2_writepage,
.prepare_write = ocfs2_prepare_write,
.commit_write = ocfs2_commit_write,
.bmap = ocfs2_bmap, .bmap = ocfs2_bmap,
.sync_page = block_sync_page, .sync_page = block_sync_page,
.direct_IO = ocfs2_direct_IO, .direct_IO = ocfs2_direct_IO,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册