提交 efc91ed0 编写于 作者: T Trond Myklebust

NFS: Optimise append writes with holes

If a file is being extended, and we're creating a hole, we might as well
declare the entire page to be up to date.

This patch significantly improves the write performance for sparse files
in the case where lseek(SEEK_END) is used to append several non-contiguous
writes at intervals of < PAGE_SIZE.
Signed-off-by: NTrond Myklebust <Trond.Myklebust@netapp.com>
上级 b390c2b5
...@@ -344,6 +344,26 @@ static int nfs_write_end(struct file *file, struct address_space *mapping, ...@@ -344,6 +344,26 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
unsigned offset = pos & (PAGE_CACHE_SIZE - 1); unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
int status; int status;
/*
* Zero any uninitialised parts of the page, and then mark the page
* as up to date if it turns out that we're extending the file.
*/
if (!PageUptodate(page)) {
unsigned pglen = nfs_page_length(page);
unsigned end = offset + len;
if (pglen == 0) {
zero_user_segments(page, 0, offset,
end, PAGE_CACHE_SIZE);
SetPageUptodate(page);
} else if (end >= pglen) {
zero_user_segment(page, end, PAGE_CACHE_SIZE);
if (offset == 0)
SetPageUptodate(page);
} else
zero_user_segment(page, pglen, PAGE_CACHE_SIZE);
}
lock_kernel(); lock_kernel();
status = nfs_updatepage(file, page, offset, copied); status = nfs_updatepage(file, page, offset, copied);
unlock_kernel(); unlock_kernel();
......
...@@ -616,7 +616,7 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx, ...@@ -616,7 +616,7 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
radix_tree_preload_end(); radix_tree_preload_end();
req = new; req = new;
goto zero_page; goto out;
} }
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
...@@ -649,19 +649,13 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx, ...@@ -649,19 +649,13 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
req->wb_offset = offset; req->wb_offset = offset;
req->wb_pgbase = offset; req->wb_pgbase = offset;
req->wb_bytes = max(end, rqend) - req->wb_offset; req->wb_bytes = max(end, rqend) - req->wb_offset;
goto zero_page; goto out;
} }
if (end > rqend) if (end > rqend)
req->wb_bytes = end - req->wb_offset; req->wb_bytes = end - req->wb_offset;
return req; out:
zero_page:
/* If this page might potentially be marked as up to date,
* then we need to zero any uninitalised data. */
if (req->wb_pgbase == 0 && req->wb_bytes != PAGE_CACHE_SIZE
&& !PageUptodate(req->wb_page))
zero_user_segment(req->wb_page, req->wb_bytes, PAGE_CACHE_SIZE);
return req; return req;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册